X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fsparc64%2Flib%2Fatomic.S;h=9633750167d06b3e9f4d285e105656d01a8ce32f;hb=refs%2Fheads%2Fvserver;hp=26463d8a467ae8cc0923a37b0d1719e15fb91d6d;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S index 26463d8a4..963375016 100644 --- a/arch/sparc64/lib/atomic.S +++ b/arch/sparc64/lib/atomic.S @@ -7,56 +7,142 @@ #include .text - .align 64 - - .globl __atomic_add - .type __atomic_add,#function -__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ - lduw [%o1], %g5 - add %g5, %o0, %g7 - cas [%o1], %g5, %g7 - cmp %g5, %g7 - bne,pn %icc, __atomic_add - membar #StoreLoad | #StoreStore - retl - add %g7, %o0, %o0 - .size __atomic_add, .-__atomic_add - - .globl __atomic_sub - .type __atomic_sub,#function -__atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ - lduw [%o1], %g5 - sub %g5, %o0, %g7 - cas [%o1], %g5, %g7 - cmp %g5, %g7 - bne,pn %icc, __atomic_sub - membar #StoreLoad | #StoreStore - retl - sub %g7, %o0, %o0 - .size __atomic_sub, .-__atomic_sub - - .globl __atomic64_add - .type __atomic64_add,#function -__atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ - ldx [%o1], %g5 - add %g5, %o0, %g7 - casx [%o1], %g5, %g7 - cmp %g5, %g7 - bne,pn %xcc, __atomic64_add - membar #StoreLoad | #StoreStore - retl - add %g7, %o0, %o0 - .size __atomic64_add, .-__atomic64_add - - .globl __atomic64_sub - .type __atomic64_sub,#function -__atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */ - ldx [%o1], %g5 - sub %g5, %o0, %g7 - casx [%o1], %g5, %g7 - cmp %g5, %g7 - bne,pn %xcc, __atomic64_sub - membar #StoreLoad | #StoreStore - retl - sub %g7, %o0, %o0 - .size __atomic64_sub, .-__atomic64_sub + + /* Two versions of the atomic routines, one that + * does not return a value and does not perform + * memory barriers, and a second which returns + * a value and does the barriers. + */ + .globl atomic_add + .type atomic_add,#function +atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ +1: lduw [%o1], %g1 + add %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 1b + nop + retl + nop + .size atomic_add, .-atomic_add + + .globl atomic_sub + .type atomic_sub,#function +atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ +1: lduw [%o1], %g1 + sub %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 1b + nop + retl + nop + .size atomic_sub, .-atomic_sub + + /* On SMP we need to use memory barriers to ensure + * correct memory operation ordering, nop these out + * for uniprocessor. + */ +#ifdef CONFIG_SMP + +#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad; +#define ATOMIC_POST_BARRIER \ + ba,pt %xcc, 80b; \ + membar #StoreLoad | #StoreStore + +80: retl + nop +#else +#define ATOMIC_PRE_BARRIER +#define ATOMIC_POST_BARRIER +#endif + + .globl atomic_add_ret + .type atomic_add_ret,#function +atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: lduw [%o1], %g1 + add %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 1b + add %g7, %o0, %g7 + sra %g7, 0, %o0 + ATOMIC_POST_BARRIER + retl + nop + .size atomic_add_ret, .-atomic_add_ret + + .globl atomic_sub_ret + .type atomic_sub_ret,#function +atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: lduw [%o1], %g1 + sub %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 1b + sub %g7, %o0, %g7 + sra %g7, 0, %o0 + ATOMIC_POST_BARRIER + retl + nop + .size atomic_sub_ret, .-atomic_sub_ret + + .globl atomic64_add + .type atomic64_add,#function +atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ +1: ldx [%o1], %g1 + add %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 1b + nop + retl + nop + .size atomic64_add, .-atomic64_add + + .globl atomic64_sub + .type atomic64_sub,#function +atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ +1: ldx [%o1], %g1 + sub %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 1b + nop + retl + nop + .size atomic64_sub, .-atomic64_sub + + .globl atomic64_add_ret + .type atomic64_add_ret,#function +atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: ldx [%o1], %g1 + add %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 1b + add %g7, %o0, %g7 + mov %g7, %o0 + ATOMIC_POST_BARRIER + retl + nop + .size atomic64_add_ret, .-atomic64_add_ret + + .globl atomic64_sub_ret + .type atomic64_sub_ret,#function +atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + ATOMIC_PRE_BARRIER +1: ldx [%o1], %g1 + sub %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 1b + sub %g7, %o0, %g7 + mov %g7, %o0 + ATOMIC_POST_BARRIER + retl + nop + .size atomic64_sub_ret, .-atomic64_sub_ret