X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fdec_and_lock.c;h=a65c314555416d9f1ea262455d1da313e1959902;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=6658d81e1836517107b5b8b39aa0c7e65032ec6e;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 6658d81e1..a65c31455 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -3,10 +3,8 @@ #include /* - * This is an architecture-neutral, but slow, - * implementation of the notion of "decrement - * a reference count, and return locked if it - * decremented to zero". + * This is an implementation of the notion of "decrement a + * reference count, and return locked if it decremented to zero". * * NOTE NOTE NOTE! This is _not_ equivalent to * @@ -18,17 +16,15 @@ * * because the spin-lock and the decrement must be * "atomic". - * - * This slow version gets the spinlock unconditionally, - * and releases it if it isn't needed. Architectures - * are encouraged to come up with better approaches, - * this is trivially done efficiently using a load-locked - * store-conditional approach, for example. */ - -#ifndef ATOMIC_DEC_AND_LOCK int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { +#ifdef CONFIG_SMP + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; +#endif + /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; @@ -37,4 +33,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) } EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif