X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fsparc64%2Flib%2FU3copy_to_user.S;h=6b421fc2f12ea7e006f1ed449f84212bb59134f7;hb=5e3b93f248c98873cc843e83092bb8da92ac85a2;hp=715227156dac0c36d96962d5912b86aa39211393;hpb=a91482bdcc2e0f6035702e46f1b99043a0893346;p=linux-2.6.git diff --git a/arch/sparc64/lib/U3copy_to_user.S b/arch/sparc64/lib/U3copy_to_user.S index 715227156..6b421fc2f 100644 --- a/arch/sparc64/lib/U3copy_to_user.S +++ b/arch/sparc64/lib/U3copy_to_user.S @@ -1,15 +1,15 @@ -/* U3copy_to_user.S: UltraSparc-III optimized memcpy. +/* $Id: U3copy_to_user.S,v 1.3 2000/11/01 09:29:19 davem Exp $ + * U3memcpy.S: UltraSparc-III optimized copy to userspace. * - * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com) */ +#ifdef __KERNEL__ #include #include #include #include - -#define XCC xcc - +#undef SMALL_COPY_USES_FPU #define EXNV(x,y,a,b) \ 98: x,y; \ .section .fixup; \ @@ -34,18 +34,6 @@ .text; \ .align 4; #define EXNV3(x,y,a,b) \ -98: x,y; \ - .section .fixup; \ - .align 4; \ -99: a, b, %o0; \ - retl; \ - add %o0, 4, %o0; \ - .section __ex_table; \ - .align 4; \ - .word 98b, 99b; \ - .text; \ - .align 4; -#define EXNV4(x,y,a,b) \ 98: x,y; \ .section .fixup; \ .align 4; \ @@ -124,9 +112,22 @@ .word 98b, 99b; \ .text; \ .align 4; - - .register %g2,#scratch - .register %g3,#scratch +#else +#define ASI_AIUS 0x80 +#define ASI_BLK_AIUS 0xf0 +#define FPRS_FEF 0x04 +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#define SMALL_COPY_USES_FPU +#define EXNV(x,y,a,b) x,y; +#define EXNV2(x,y,a,b) x,y; +#define EXNV3(x,y,a,b) x,y; +#define EX(x,y,a,b) x,y; +#define EXBLK1(x,y) x,y; +#define EXBLK2(x,y) x,y; +#define EXBLK3(x,y) x,y; +#define EXBLK4(x,y) x,y; +#endif /* Special/non-trivial issues of this code: * @@ -147,269 +148,400 @@ * of up to 2.4GB per second. */ - .globl U3copy_to_user -U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */ + .globl U3copy_to_user +U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */ /* Writing to %asi is _expensive_ so we hardcode it. * Reading %asi to check for KERNEL_DS is comparatively * cheap. */ - rd %asi, %g1 - cmp %g1, ASI_AIUS - bne,pn %icc, U3memcpy_user_stub - nop - - cmp %o2, 0 - be,pn %XCC, 85f - or %o0, %o1, %o3 - cmp %o2, 16 - bleu,a,pn %XCC, 80f - or %o3, %o2, %o3 - - cmp %o2, 256 - blu,pt %XCC, 70f - andcc %o3, 0x7, %g0 - - ba,pt %xcc, 1f - andcc %o0, 0x3f, %g2 - - /* Here len >= 256 and condition codes reflect execution + rd %asi, %g1 ! MS Group (4 cycles) + cmp %g1, ASI_AIUS ! A0 Group + bne U3memcpy ! BR + nop ! A1 +#ifndef __KERNEL__ + /* Save away original 'dst' for memcpy return value. */ + mov %o0, %g3 ! A0 Group +#endif + /* Anything to copy at all? */ + cmp %o2, 0 ! A1 + ble,pn %icc, U3copy_to_user_short_ret ! BR + + /* Extremely small copy? */ + cmp %o2, 31 ! A0 Group + ble,pn %icc, U3copy_to_user_short ! BR + + /* Large enough to use unrolled prefetch loops? */ + cmp %o2, 0x100 ! A1 + bge,a,pt %icc, U3copy_to_user_enter ! BR Group + andcc %o0, 0x3f, %g2 ! A0 + + ba,pt %xcc, U3copy_to_user_toosmall ! BR Group + andcc %o0, 0x7, %g2 ! A0 + + .align 32 +U3copy_to_user_short: + /* Copy %o2 bytes from src to dst, one byte at a time. */ + ldub [%o1 + 0x00], %o3 ! MS Group + add %o1, 0x1, %o1 ! A0 + add %o0, 0x1, %o0 ! A1 + subcc %o2, 1, %o2 ! A0 Group + + bg,pt %icc, U3copy_to_user_short ! BR + EXNV(stba %o3, [%o0 + -1] %asi, add %o2, 1) ! MS Group (1-cycle stall) + +U3copy_to_user_short_ret: +#ifdef __KERNEL__ + retl ! BR Group (0-4 cycle stall) + clr %o0 ! A0 +#else + retl ! BR Group (0-4 cycle stall) + mov %g3, %o0 ! A0 +#endif + + /* Here len >= (6 * 64) and condition codes reflect execution * of "andcc %o0, 0x7, %g2", done by caller. */ .align 64 -1: +U3copy_to_user_enter: /* Is 'dst' already aligned on an 64-byte boundary? */ - be,pt %XCC, 2f + be,pt %xcc, 2f ! BR /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number * of bytes to copy to make 'dst' 64-byte aligned. We pre- * subtract this from 'len'. */ - sub %g2, 0x40, %g2 - sub %g0, %g2, %g2 - sub %o2, %g2, %o2 + sub %g2, 0x40, %g2 ! A0 Group + sub %g0, %g2, %g2 ! A0 Group + sub %o2, %g2, %o2 ! A0 Group /* Copy %g2 bytes from src to dst, one byte at a time. */ -1: ldub [%o1 + 0x00], %o3 - add %o1, 0x1, %o1 - add %o0, 0x1, %o0 - subcc %g2, 0x1, %g2 - - bg,pt %XCC, 1b - EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) +1: ldub [%o1 + 0x00], %o3 ! MS (Group) + add %o1, 0x1, %o1 ! A1 + add %o0, 0x1, %o0 ! A0 Group + subcc %g2, 0x1, %g2 ! A1 -2: VISEntryHalf - and %o1, 0x7, %g1 - ba,pt %xcc, 1f - alignaddr %o1, %g0, %o1 + bg,pt %icc, 1b ! BR Group + EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group - .align 64 -1: - membar #StoreLoad | #StoreStore | #LoadStore - prefetch [%o1 + 0x000], #one_read - prefetch [%o1 + 0x040], #one_read - andn %o2, (0x40 - 1), %o4 - prefetch [%o1 + 0x080], #one_read - prefetch [%o1 + 0x0c0], #one_read - ldd [%o1 + 0x000], %f0 - prefetch [%o1 + 0x100], #one_read - ldd [%o1 + 0x008], %f2 - prefetch [%o1 + 0x140], #one_read - ldd [%o1 + 0x010], %f4 - prefetch [%o1 + 0x180], #one_read - faligndata %f0, %f2, %f16 - ldd [%o1 + 0x018], %f6 - faligndata %f2, %f4, %f18 - ldd [%o1 + 0x020], %f8 - faligndata %f4, %f6, %f20 - ldd [%o1 + 0x028], %f10 - faligndata %f6, %f8, %f22 - - ldd [%o1 + 0x030], %f12 - faligndata %f8, %f10, %f24 - ldd [%o1 + 0x038], %f14 - faligndata %f10, %f12, %f26 - ldd [%o1 + 0x040], %f0 - - sub %o4, 0x80, %o4 - add %o1, 0x40, %o1 - ba,pt %xcc, 1f - srl %o4, 6, %o3 +2: VISEntryHalf ! MS+MS + and %o1, 0x7, %g1 ! A1 + ba,pt %xcc, U3copy_to_user_begin ! BR + alignaddr %o1, %g0, %o1 ! MS (Break-after) .align 64 -1: - ldd [%o1 + 0x008], %f2 - faligndata %f12, %f14, %f28 - ldd [%o1 + 0x010], %f4 - faligndata %f14, %f0, %f30 - EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) - ldd [%o1 + 0x018], %f6 - faligndata %f0, %f2, %f16 - - ldd [%o1 + 0x020], %f8 - faligndata %f2, %f4, %f18 - ldd [%o1 + 0x028], %f10 - faligndata %f4, %f6, %f20 - ldd [%o1 + 0x030], %f12 - faligndata %f6, %f8, %f22 - ldd [%o1 + 0x038], %f14 - faligndata %f8, %f10, %f24 - - ldd [%o1 + 0x040], %f0 - prefetch [%o1 + 0x180], #one_read - faligndata %f10, %f12, %f26 - subcc %o3, 0x01, %o3 - add %o1, 0x40, %o1 - bg,pt %XCC, 1b - add %o0, 0x40, %o0 +U3copy_to_user_begin: +#ifdef __KERNEL__ + .globl U3copy_to_user_nop_1_6 +U3copy_to_user_nop_1_6: + ldxa [%g0] ASI_DCU_CONTROL_REG, %g3 + sethi %uhi(DCU_PE), %o3 + sllx %o3, 32, %o3 + or %g3, %o3, %o3 + stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache + membar #Sync +#endif + prefetch [%o1 + 0x000], #one_read ! MS Group1 + prefetch [%o1 + 0x040], #one_read ! MS Group2 + andn %o2, (0x40 - 1), %o4 ! A0 + prefetch [%o1 + 0x080], #one_read ! MS Group3 + cmp %o4, 0x140 ! A0 + prefetch [%o1 + 0x0c0], #one_read ! MS Group4 + ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8) + bge,a,pt %icc, 1f ! BR + + prefetch [%o1 + 0x100], #one_read ! MS Group6 +1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9) + cmp %o4, 0x180 ! A1 + bge,a,pt %icc, 1f ! BR + prefetch [%o1 + 0x140], #one_read ! MS Group7 +1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10) + cmp %o4, 0x1c0 ! A1 + bge,a,pt %icc, 1f ! BR + + prefetch [%o1 + 0x180], #one_read ! MS Group8 +1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12) + ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12) + faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13) + ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13) + faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15) + ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15) + faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16) + + ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16) + faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18) + ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18) + faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19) + ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19) + + /* We only use the first loop if len > (7 * 64). */ + subcc %o4, 0x1c0, %o4 ! A0 Group17 + bg,pt %icc, U3copy_to_user_loop1 ! BR + add %o1, 0x40, %o1 ! A1 + + add %o4, 0x140, %o4 ! A0 Group18 + ba,pt %xcc, U3copy_to_user_loop2 ! BR + srl %o4, 6, %o3 ! A0 Group19 + nop + nop + nop + nop + nop + + nop + nop + + /* This loop performs the copy and queues new prefetches. + * We drop into the second loop when len <= (5 * 64). Note + * that this (5 * 64) factor has been subtracted from len + * already. + */ +U3copy_to_user_loop1: + ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5) + faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5) + ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6) + faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7) + EXBLK1(stda %f16, [%o0] ASI_BLK_AIUS) ! MS + ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7) + + faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall) + ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15) + faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16) + ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16) + faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17) + ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17) + faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18) + ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18) + + faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19) + ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19) + prefetch [%o1 + 0x180], #one_read ! MS + faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20) + subcc %o4, 0x40, %o4 ! A0 + add %o1, 0x40, %o1 ! A1 + bg,pt %xcc, U3copy_to_user_loop1 ! BR + add %o0, 0x40, %o0 ! A0 Group18 + +U3copy_to_user_loop2_enter: + mov 5, %o3 ! A1 + + /* This loop performs on the copy, no new prefetches are + * queued. We do things this way so that we do not perform + * any spurious prefetches past the end of the src buffer. + */ +U3copy_to_user_loop2: + ldd [%o1 + 0x008], %f2 ! MS + faligndata %f12, %f14, %f28 ! FGA Group2 + ldd [%o1 + 0x010], %f4 ! MS + faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall) + EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) ! MS + ldd [%o1 + 0x018], %f6 ! AX + faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall) + + ldd [%o1 + 0x020], %f8 ! MS + faligndata %f2, %f4, %f18 ! FGA Group13 + ldd [%o1 + 0x028], %f10 ! MS + faligndata %f4, %f6, %f20 ! FGA Group14 + ldd [%o1 + 0x030], %f12 ! MS + faligndata %f6, %f8, %f22 ! FGA Group15 + ldd [%o1 + 0x038], %f14 ! MS + faligndata %f8, %f10, %f24 ! FGA Group16 + + ldd [%o1 + 0x040], %f0 ! AX + faligndata %f10, %f12, %f26 ! FGA Group17 + subcc %o3, 0x01, %o3 ! A0 + add %o1, 0x40, %o1 ! A1 + bg,pt %xcc, U3copy_to_user_loop2 ! BR + add %o0, 0x40, %o0 ! A0 Group18 /* Finally we copy the last full 64-byte block. */ - ldd [%o1 + 0x008], %f2 - faligndata %f12, %f14, %f28 - ldd [%o1 + 0x010], %f4 - faligndata %f14, %f0, %f30 - EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) - ldd [%o1 + 0x018], %f6 - faligndata %f0, %f2, %f16 - ldd [%o1 + 0x020], %f8 - faligndata %f2, %f4, %f18 - ldd [%o1 + 0x028], %f10 - faligndata %f4, %f6, %f20 - ldd [%o1 + 0x030], %f12 - faligndata %f6, %f8, %f22 - ldd [%o1 + 0x038], %f14 - faligndata %f8, %f10, %f24 - cmp %g1, 0 - be,pt %XCC, 1f - add %o0, 0x40, %o0 - ldd [%o1 + 0x040], %f0 -1: faligndata %f10, %f12, %f26 - faligndata %f12, %f14, %f28 - faligndata %f14, %f0, %f30 - EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) - add %o0, 0x40, %o0 - add %o1, 0x40, %o1 - - membar #Sync +U3copy_to_user_loopfini: + ldd [%o1 + 0x008], %f2 ! MS + faligndata %f12, %f14, %f28 ! FGA + ldd [%o1 + 0x010], %f4 ! MS Group19 + faligndata %f14, %f0, %f30 ! FGA + EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) ! MS Group20 + ldd [%o1 + 0x018], %f6 ! AX + faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall) + ldd [%o1 + 0x020], %f8 ! MS + faligndata %f2, %f4, %f18 ! FGA Group12 + ldd [%o1 + 0x028], %f10 ! MS + faligndata %f4, %f6, %f20 ! FGA Group13 + ldd [%o1 + 0x030], %f12 ! MS + faligndata %f6, %f8, %f22 ! FGA Group14 + ldd [%o1 + 0x038], %f14 ! MS + faligndata %f8, %f10, %f24 ! FGA Group15 + cmp %g1, 0 ! A0 + be,pt %icc, 1f ! BR + add %o0, 0x40, %o0 ! A1 + ldd [%o1 + 0x040], %f0 ! MS +1: faligndata %f10, %f12, %f26 ! FGA Group16 + faligndata %f12, %f14, %f28 ! FGA Group17 + faligndata %f14, %f0, %f30 ! FGA Group18 + EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS + add %o0, 0x40, %o0 ! A0 + add %o1, 0x40, %o1 ! A1 +#ifdef __KERNEL__ + .globl U3copy_to_user_nop_2_3 +U3copy_to_user_nop_2_3: + mov PRIMARY_CONTEXT, %o3 + stxa %g0, [%o3] ASI_DMMU ! Flush P-cache + stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache +#endif + membar #Sync ! MS Group26 (7-cycle stall) /* Now we copy the (len modulo 64) bytes at the end. * Note how we borrow the %f0 loaded above. * * Also notice how this code is careful not to perform a - * load past the end of the src buffer. + * load past the end of the src buffer just like similar + * code found in U3copy_to_user_toosmall processing. */ - and %o2, 0x3f, %o2 - andcc %o2, 0x38, %g2 - be,pn %XCC, 2f - subcc %g2, 0x8, %g2 - be,pn %XCC, 2f - cmp %g1, 0 - - be,a,pt %XCC, 1f - ldd [%o1 + 0x00], %f0 - -1: ldd [%o1 + 0x08], %f2 - add %o1, 0x8, %o1 - sub %o2, 0x8, %o2 - subcc %g2, 0x8, %g2 - faligndata %f0, %f2, %f8 - EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) - be,pn %XCC, 2f - add %o0, 0x8, %o0 - ldd [%o1 + 0x08], %f0 - add %o1, 0x8, %o1 - sub %o2, 0x8, %o2 - subcc %g2, 0x8, %g2 - faligndata %f2, %f0, %f8 - EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) - bne,pn %XCC, 1b - add %o0, 0x8, %o0 +U3copy_to_user_loopend: + and %o2, 0x3f, %o2 ! A0 Group + andcc %o2, 0x38, %g2 ! A0 Group + be,pn %icc, U3copy_to_user_endcruft ! BR + subcc %g2, 0x8, %g2 ! A1 + be,pn %icc, U3copy_to_user_endcruft ! BR Group + cmp %g1, 0 ! A0 + + be,a,pt %icc, 1f ! BR Group + ldd [%o1 + 0x00], %f0 ! MS + +1: ldd [%o1 + 0x08], %f2 ! MS Group + add %o1, 0x8, %o1 ! A0 + sub %o2, 0x8, %o2 ! A1 + subcc %g2, 0x8, %g2 ! A0 Group + faligndata %f0, %f2, %f8 ! FGA Group + EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX) + be,pn %icc, U3copy_to_user_endcruft ! BR + add %o0, 0x8, %o0 ! A0 + ldd [%o1 + 0x08], %f0 ! MS Group + add %o1, 0x8, %o1 ! A0 + sub %o2, 0x8, %o2 ! A1 + subcc %g2, 0x8, %g2 ! A0 Group + faligndata %f2, %f0, %f8 ! FGA + EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX) + bne,pn %icc, 1b ! BR + add %o0, 0x8, %o0 ! A0 Group /* If anything is left, we copy it one byte at a time. * Note that %g1 is (src & 0x3) saved above before the * alignaddr was performed. */ -2: +U3copy_to_user_endcruft: cmp %o2, 0 add %o1, %g1, %o1 VISExitHalf - be,pn %XCC, 85f - sub %o0, %o1, %o3 - - andcc %g1, 0x7, %g0 - bne,pn %icc, 90f - andcc %o2, 0x8, %g0 - be,pt %icc, 1f + be,pn %icc, U3copy_to_user_short_ret nop - ldx [%o1], %o5 - EXNV(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0) - add %o1, 0x8, %o1 + ba,a,pt %xcc, U3copy_to_user_short -1: andcc %o2, 0x4, %g0 - be,pt %icc, 1f - nop - lduw [%o1], %o5 - EXNV(stwa %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x7) - add %o1, 0x4, %o1 + /* If we get here, then 32 <= len < (6 * 64) */ +U3copy_to_user_toosmall: -1: andcc %o2, 0x2, %g0 - be,pt %icc, 1f - nop - lduh [%o1], %o5 - EXNV(stha %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x3) - add %o1, 0x2, %o1 +#ifdef SMALL_COPY_USES_FPU -1: andcc %o2, 0x1, %g0 - be,pt %icc, 85f - nop - ldub [%o1], %o5 - ba,pt %xcc, 85f - EXNV(stba %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x1) - -70: /* 16 < len <= 64 */ - bne,pn %XCC, 90f - sub %o0, %o1, %o3 - - andn %o2, 0x7, %o4 - and %o2, 0x7, %o2 -1: subcc %o4, 0x8, %o4 - ldx [%o1], %o5 - EXNV4(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %o4) - bgu,pt %XCC, 1b - add %o1, 0x8, %o1 - andcc %o2, 0x4, %g0 - be,pt %XCC, 1f - nop - sub %o2, 0x4, %o2 - lduw [%o1], %o5 - EXNV3(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0) - add %o1, 0x4, %o1 -1: cmp %o2, 0 - be,pt %XCC, 85f - nop - ba,pt %xcc, 90f - nop + /* Is 'dst' already aligned on an 8-byte boundary? */ + be,pt %xcc, 2f ! BR Group -80: /* 0 < len <= 16 */ - andcc %o3, 0x3, %g0 - bne,pn %XCC, 90f - sub %o0, %o1, %o3 + /* Compute abs((dst & 7) - 8) into %g2. This is the number + * of bytes to copy to make 'dst' 8-byte aligned. We pre- + * subtract this from 'len'. + */ + sub %g2, 0x8, %g2 ! A0 + sub %g0, %g2, %g2 ! A0 Group (reg-dep) + sub %o2, %g2, %o2 ! A0 Group (reg-dep) -1: - subcc %o2, 4, %o2 - lduw [%o1], %g1 - EXNV3(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0) - bgu,pt %XCC, 1b - add %o1, 4, %o1 + /* Copy %g2 bytes from src to dst, one byte at a time. */ +1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles) + add %o1, 0x1, %o1 ! A1 + add %o0, 0x1, %o0 ! A0 Group + subcc %g2, 0x1, %g2 ! A1 -85: retl - clr %o0 + bg,pt %icc, 1b ! BR Group + EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group - .align 32 -90: - subcc %o2, 1, %o2 - ldub [%o1], %g1 - EXNV2(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0) - bgu,pt %XCC, 90b - add %o1, 1, %o1 - retl - clr %o0 +2: VISEntryHalf ! MS+MS + + /* Compute (len - (len % 8)) into %g2. This is guaranteed + * to be nonzero. + */ + andn %o2, 0x7, %g2 ! A0 Group + + /* You may read this and believe that it allows reading + * one 8-byte longword past the end of src. It actually + * does not, as %g2 is subtracted as loads are done from + * src, so we always stop before running off the end. + * Also, we are guaranteed to have at least 0x10 bytes + * to move here. + */ + sub %g2, 0x8, %g2 ! A0 Group (reg-dep) + alignaddr %o1, %g0, %g1 ! MS (Break-after) + ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall) + add %g1, 0x8, %g1 ! A0 + +1: ldd [%g1 + 0x00], %f2 ! MS Group + add %g1, 0x8, %g1 ! A0 + sub %o2, 0x8, %o2 ! A1 + subcc %g2, 0x8, %g2 ! A0 Group + + faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall) + EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall) + add %o1, 0x8, %o1 ! A0 + be,pn %icc, 2f ! BR + + add %o0, 0x8, %o0 ! A1 + ldd [%g1 + 0x00], %f0 ! MS Group + add %g1, 0x8, %g1 ! A0 + sub %o2, 0x8, %o2 ! A1 + + subcc %g2, 0x8, %g2 ! A0 Group + faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall) + EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall) + add %o1, 0x8, %o1 ! A0 + + bne,pn %icc, 1b ! BR + add %o0, 0x8, %o0 ! A1 + + /* Nothing left to copy? */ +2: cmp %o2, 0 ! A0 Group + VISExitHalf ! A0+MS + be,pn %icc, U3copy_to_user_short_ret ! BR Group + nop ! A0 + ba,a,pt %xcc, U3copy_to_user_short ! BR Group + +#else /* !(SMALL_COPY_USES_FPU) */ + + xor %o1, %o0, %g2 + andcc %g2, 0x7, %g0 + bne,pn %icc, U3copy_to_user_short + andcc %o1, 0x7, %g2 + + be,pt %xcc, 2f + sub %g2, 0x8, %g2 + sub %g0, %g2, %g2 + sub %o2, %g2, %o2 + +1: ldub [%o1 + 0x00], %o3 + add %o1, 0x1, %o1 + add %o0, 0x1, %o0 + subcc %g2, 0x1, %g2 + bg,pt %icc, 1b + EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) + +2: andn %o2, 0x7, %g2 + sub %o2, %g2, %o2 + +3: ldx [%o1 + 0x00], %o3 + add %o1, 0x8, %o1 + add %o0, 0x8, %o0 + subcc %g2, 0x8, %g2 + bg,pt %icc, 3b + EXNV3(stxa %o3, [%o0 + -8] %asi, add %o2, %g2) + + cmp %o2, 0 + bne,pn %icc, U3copy_to_user_short + nop + ba,a,pt %xcc, U3copy_to_user_short_ret + +#endif /* !(SMALL_COPY_USES_FPU) */