X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fdec_and_lock.c;h=a65c314555416d9f1ea262455d1da313e1959902;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=f84d1608bef55e48a0c102999d9b3f3f87fbe696;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index f84d1608b..a65c31455 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -3,10 +3,8 @@ #include /* - * This is an architecture-neutral, but slow, - * implementation of the notion of "decrement - * a reference count, and return locked if it - * decremented to zero". + * This is an implementation of the notion of "decrement a + * reference count, and return locked if it decremented to zero". * * NOTE NOTE NOTE! This is _not_ equivalent to * @@ -18,17 +16,15 @@ * * because the spin-lock and the decrement must be * "atomic". - * - * This slow version gets the spinlock unconditionally, - * and releases it if it isn't needed. Architectures - * are encouraged to come up with better approaches, - * this is trivially done efficiently using a load-locked - * store-conditional approach, for example. */ - -#ifndef ATOMIC_DEC_AND_LOCK -int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) +int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { +#ifdef CONFIG_SMP + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; +#endif + /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; @@ -36,5 +32,4 @@ int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) return 0; } -EXPORT_SYMBOL(atomic_dec_and_lock); -#endif +EXPORT_SYMBOL(_atomic_dec_and_lock);