X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-mips%2Fatomic.h;h=c1a2409bb52a171fc1c1c0b9070b7791f350e812;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=7d89e87bc8c6fd9544ef3b247f57421248741396;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 7d89e87bc..c1a2409bb 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -9,25 +9,16 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle + * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle */ - -/* - * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in - * we have to include outside the - * main big wrapper ... - */ -#include -#include - #ifndef _ASM_ATOMIC_H #define _ASM_ATOMIC_H +#include +#include #include #include -extern spinlock_t atomic_lock; - typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } @@ -62,28 +53,32 @@ static __inline__ void atomic_add(int i, atomic_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %0, %1 # atomic_add \n" " addu %0, %2 \n" " sc %0, %1 \n" " beqzl %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %0, %1 # atomic_add \n" " addu %0, %2 \n" " sc %0, %1 \n" " beqz %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); v->counter += i; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } } @@ -100,28 +95,32 @@ static __inline__ void atomic_sub(int i, atomic_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %0, %1 # atomic_sub \n" " subu %0, %2 \n" " sc %0, %1 \n" " beqzl %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %0, %1 # atomic_sub \n" " subu %0, %2 \n" " sc %0, %1 \n" " beqz %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); v->counter -= i; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } } @@ -132,16 +131,19 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %1, %2 # atomic_add_return \n" " addu %0, %1, %3 \n" " sc %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); @@ -149,25 +151,28 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %1, %2 # atomic_add_return \n" " addu %0, %1, %3 \n" " sc %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); result = v->counter; result += i; v->counter = result; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } + smp_mb(); + return result; } @@ -175,16 +180,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %1, %2 # atomic_sub_return \n" " subu %0, %1, %3 \n" " sc %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); @@ -192,50 +200,60 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %1, %2 # atomic_sub_return \n" " subu %0, %1, %3 \n" " sc %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); result = v->counter; result -= i; v->counter = result; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } + smp_mb(); + return result; } /* - * atomic_sub_if_positive - add integer to atomic variable + * atomic_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically test @v and decrement if it is greater than 0. - * The function returns the old value of @v minus 1. + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. */ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" " sc %0, %2 \n" + " .set noreorder \n" " beqzl %0, 1b \n" - " sync \n" + " subu %0, %1, %3 \n" + " .set reorder \n" "1: \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); @@ -243,30 +261,58 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" " sc %0, %2 \n" + " .set noreorder \n" " beqz %0, 1b \n" - " sync \n" + " subu %0, %1, %3 \n" + " .set reorder \n" "1: \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); result = v->counter; result -= i; if (result >= 0) v->counter = result; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } + smp_mb(); + return result; } +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) @@ -334,9 +380,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) */ #define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0) -#ifdef CONFIG_MIPS64 +#ifdef CONFIG_64BIT -typedef struct { volatile __s64 counter; } atomic64_t; +typedef struct { volatile long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } @@ -367,28 +413,32 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %0, %1 # atomic64_add \n" " addu %0, %2 \n" " scd %0, %1 \n" " beqzl %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %0, %1 # atomic64_add \n" " addu %0, %2 \n" " scd %0, %1 \n" " beqz %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); v->counter += i; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } } @@ -405,28 +455,32 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %0, %1 # atomic64_sub \n" " subu %0, %2 \n" " scd %0, %1 \n" " beqzl %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %0, %1 # atomic64_sub \n" " subu %0, %2 \n" " scd %0, %1 \n" " beqz %0, 1b \n" + " .set mips0 \n" : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); v->counter -= i; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } } @@ -437,16 +491,19 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %1, %2 # atomic64_add_return \n" " addu %0, %1, %3 \n" " scd %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); @@ -454,25 +511,28 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %1, %2 # atomic64_add_return \n" " addu %0, %1, %3 \n" " scd %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); result = v->counter; result += i; v->counter = result; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } + smp_mb(); + return result; } @@ -480,16 +540,19 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %1, %2 # atomic64_sub_return \n" " subu %0, %1, %3 \n" " scd %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); @@ -497,50 +560,60 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %1, %2 # atomic64_sub_return \n" " subu %0, %1, %3 \n" " scd %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); result = v->counter; result -= i; v->counter = result; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } + smp_mb(); + return result; } /* - * atomic64_sub_if_positive - add integer to atomic variable + * atomic64_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract * @v: pointer of type atomic64_t * - * Atomically test @v and decrement if it is greater than 0. - * The function returns the old value of @v minus 1. + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. */ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" " scd %0, %2 \n" + " .set noreorder \n" " beqzl %0, 1b \n" - " sync \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" "1: \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); @@ -548,27 +621,33 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) unsigned long temp; __asm__ __volatile__( + " .set mips3 \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" " scd %0, %2 \n" + " .set noreorder \n" " beqz %0, 1b \n" - " sync \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" "1: \n" + " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) : "memory"); } else { unsigned long flags; - spin_lock_irqsave(&atomic_lock, flags); + local_irq_save(flags); result = v->counter; result -= i; if (result >= 0) v->counter = result; - spin_unlock_irqrestore(&atomic_lock, flags); + local_irq_restore(flags); } + smp_mb(); + return result; } @@ -639,7 +718,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) */ #define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0) -#endif /* CONFIG_MIPS64 */ +#endif /* CONFIG_64BIT */ /* * atomic*_return operations are serializing but not the non-*_return @@ -650,4 +729,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#include #endif /* _ASM_ATOMIC_H */