X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fsparc64%2Flib%2Fatomic.S;h=41be4131f8008429687bd5b84953e95981803a77;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=26463d8a467ae8cc0923a37b0d1719e15fb91d6d;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S index 26463d8a4..41be4131f 100644 --- a/arch/sparc64/lib/atomic.S +++ b/arch/sparc64/lib/atomic.S @@ -4,59 +4,136 @@ * Copyright (C) 1999 David S. Miller (davem@redhat.com) */ +#include #include + /* On SMP we need to use memory barriers to ensure + * correct memory operation ordering, nop these out + * for uniprocessor. + */ +#ifdef CONFIG_SMP +#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad +#define ATOMIC_POST_BARRIER membar #StoreLoad | #StoreStore +#else +#define ATOMIC_PRE_BARRIER nop +#define ATOMIC_POST_BARRIER nop +#endif + .text - .align 64 - .globl __atomic_add - .type __atomic_add,#function -__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ - lduw [%o1], %g5 + /* Two versions of the atomic routines, one that + * does not return a value and does not perform + * memory barriers, and a second which returns + * a value and does the barriers. + */ + .globl atomic_add + .type atomic_add,#function +atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ +1: lduw [%o1], %g5 + add %g5, %o0, %g7 + cas [%o1], %g5, %g7 + cmp %g5, %g7 + bne,pn %icc, 1b + nop + retl + nop + .size atomic_add, .-atomic_add + + .globl atomic_sub + .type atomic_sub,#function +atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ +1: lduw [%o1], %g5 + sub %g5, %o0, %g7 + cas [%o1], %g5, %g7 + cmp %g5, %g7 + bne,pn %icc, 1b + nop + retl + nop + .size atomic_sub, .-atomic_sub + + .globl atomic_add_ret + .type atomic_add_ret,#function +atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: lduw [%o1], %g5 add %g5, %o0, %g7 cas [%o1], %g5, %g7 cmp %g5, %g7 - bne,pn %icc, __atomic_add - membar #StoreLoad | #StoreStore + bne,pn %icc, 1b + add %g7, %o0, %g7 + ATOMIC_POST_BARRIER retl - add %g7, %o0, %o0 - .size __atomic_add, .-__atomic_add + sra %g7, 0, %o0 + .size atomic_add_ret, .-atomic_add_ret - .globl __atomic_sub - .type __atomic_sub,#function -__atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ - lduw [%o1], %g5 + .globl atomic_sub_ret + .type atomic_sub_ret,#function +atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: lduw [%o1], %g5 sub %g5, %o0, %g7 cas [%o1], %g5, %g7 cmp %g5, %g7 - bne,pn %icc, __atomic_sub - membar #StoreLoad | #StoreStore + bne,pn %icc, 1b + sub %g7, %o0, %g7 + ATOMIC_POST_BARRIER + retl + sra %g7, 0, %o0 + .size atomic_sub_ret, .-atomic_sub_ret + + .globl atomic64_add + .type atomic64_add,#function +atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ +1: ldx [%o1], %g5 + add %g5, %o0, %g7 + casx [%o1], %g5, %g7 + cmp %g5, %g7 + bne,pn %xcc, 1b + nop + retl + nop + .size atomic64_add, .-atomic64_add + + .globl atomic64_sub + .type atomic64_sub,#function +atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ +1: ldx [%o1], %g5 + sub %g5, %o0, %g7 + casx [%o1], %g5, %g7 + cmp %g5, %g7 + bne,pn %xcc, 1b + nop retl - sub %g7, %o0, %o0 - .size __atomic_sub, .-__atomic_sub + nop + .size atomic64_sub, .-atomic64_sub - .globl __atomic64_add - .type __atomic64_add,#function -__atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ - ldx [%o1], %g5 + .globl atomic64_add_ret + .type atomic64_add_ret,#function +atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: ldx [%o1], %g5 add %g5, %o0, %g7 casx [%o1], %g5, %g7 cmp %g5, %g7 - bne,pn %xcc, __atomic64_add - membar #StoreLoad | #StoreStore + bne,pn %xcc, 1b + add %g7, %o0, %g7 + ATOMIC_POST_BARRIER retl - add %g7, %o0, %o0 - .size __atomic64_add, .-__atomic64_add + mov %g7, %o0 + .size atomic64_add_ret, .-atomic64_add_ret - .globl __atomic64_sub - .type __atomic64_sub,#function -__atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */ - ldx [%o1], %g5 + .globl atomic64_sub_ret + .type atomic64_sub_ret,#function +atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: ldx [%o1], %g5 sub %g5, %o0, %g7 casx [%o1], %g5, %g7 cmp %g5, %g7 - bne,pn %xcc, __atomic64_sub - membar #StoreLoad | #StoreStore + bne,pn %xcc, 1b + sub %g7, %o0, %g7 + ATOMIC_POST_BARRIER retl - sub %g7, %o0, %o0 - .size __atomic64_sub, .-__atomic64_sub + mov %g7, %o0 + .size atomic64_sub_ret, .-atomic64_sub_ret