-/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
+/* $Id: U3copy_from_user.S,v 1.4 2002/01/15 07:16:26 davem Exp $
+ * U3memcpy.S: UltraSparc-III optimized copy from userspace.
*
- * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
*/
+#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-
-#define XCC xcc
-
-#define EXNV_RAW(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: ba U3cfu_fixup; \
- a, b, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
+#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
-99: add %o1, %o3, %o0; \
+99: VISExitHalf; \
ba U3cfu_fixup; \
a, b, %o1; \
.section __ex_table; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV4(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: add %o1, %o3, %o0; \
- a, b, %o1; \
- ba U3cfu_fixup; \
- add %o1, 4, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXNV8(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: add %o1, %o3, %o0; \
- a, b, %o1; \
- ba U3cfu_fixup; \
- add %o1, 8, %o1; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.word 98b, 99b; \
.text; \
.align 4;
-
- .register %g2,#scratch
- .register %g3,#scratch
+#else
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF 0x04
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#define SMALL_COPY_USES_FPU
+#define EXNV(x,y,a,b) x,y;
+#define EX(x,y,a,b) x,y;
+#define EX2(x,y) x,y;
+#define EX3(x,y) x,y;
+#define EX4(x,y) x,y;
+#endif
/* Special/non-trivial issues of this code:
*
* of up to 2.4GB per second.
*/
- .globl U3copy_from_user
-U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
- cmp %o2, 0
- be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
- bleu,a,pn %XCC, 80f
- or %o3, %o2, %o3
-
- cmp %o2, 256
- blu,pt %XCC, 70f
- andcc %o3, 0x7, %g0
-
- ba,pt %xcc, 1f
- andcc %o0, 0x3f, %g2
-
- /* Here len >= 256 and condition codes reflect execution
+ .globl U3copy_from_user
+U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
+#ifndef __KERNEL__
+ /* Save away original 'dst' for memcpy return value. */
+ mov %o0, %g3 ! A0 Group
+#endif
+ /* Anything to copy at all? */
+ cmp %o2, 0 ! A1
+ ble,pn %icc, U3copy_from_user_short_ret! BR
+
+ /* Extremely small copy? */
+ cmp %o2, 31 ! A0 Group
+ ble,pn %icc, U3copy_from_user_short ! BR
+
+ /* Large enough to use unrolled prefetch loops? */
+ cmp %o2, 0x100 ! A1
+ bge,a,pt %icc, U3copy_from_user_enter ! BR Group
+ andcc %o0, 0x3f, %g2 ! A0
+
+ ba,pt %xcc, U3copy_from_user_toosmall ! BR Group
+ andcc %o0, 0x7, %g2 ! A0
+
+ .align 32
+U3copy_from_user_short:
+ /* Copy %o2 bytes from src to dst, one byte at a time. */
+ EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g0)! MS Group
+ add %o1, 0x1, %o1 ! A0
+ add %o0, 0x1, %o0 ! A1
+ subcc %o2, 1, %o2 ! A0 Group
+
+ bg,pt %icc, U3copy_from_user_short ! BR
+ stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
+
+U3copy_from_user_short_ret:
+#ifdef __KERNEL__
+ retl ! BR Group (0-4 cycle stall)
+ clr %o0 ! A0
+#else
+ retl ! BR Group (0-4 cycle stall)
+ mov %g3, %o0 ! A0
+#endif
+
+ /* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-1:
+U3copy_from_user_enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %XCC, 2f
+ be,pt %xcc, 2f ! BR
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+ sub %g2, 0x40, %g2 ! A0 Group
+ sub %g0, %g2, %g2 ! A0 Group
+ sub %o2, %g2, %o2 ! A0 Group
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: EXNV_RAW(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
- bg,pt %XCC, 1b
- stb %o3, [%o0 + -1]
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-2: VISEntryHalf
- and %o1, 0x7, %g1
- ba,pt %xcc, 1f
- alignaddr %o1, %g0, %o1
-
- .align 64
-1:
- membar #StoreLoad | #StoreStore | #LoadStore
- prefetcha [%o1 + 0x000] %asi, #one_read
- prefetcha [%o1 + 0x040] %asi, #one_read
- andn %o2, (0x40 - 1), %o4
- prefetcha [%o1 + 0x080] %asi, #one_read
- prefetcha [%o1 + 0x0c0] %asi, #one_read
- EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0)
- prefetcha [%o1 + 0x100] %asi, #one_read
- EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0)
- prefetcha [%o1 + 0x140] %asi, #one_read
- EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0)
- prefetcha [%o1 + 0x180] %asi, #one_read
- faligndata %f0, %f2, %f16
- EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0)
- faligndata %f2, %f4, %f18
- EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0)
- faligndata %f4, %f6, %f20
- EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0)
- faligndata %f6, %f8, %f22
-
- EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0)
- faligndata %f8, %f10, %f24
- EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0)
- faligndata %f10, %f12, %f26
- EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0)
-
- sub %o4, 0x80, %o4
- add %o1, 0x40, %o1
- ba,pt %xcc, 1f
- srl %o4, 6, %o3
+2: VISEntryHalf ! MS+MS
+ and %o1, 0x7, %g1 ! A1
+ ba,pt %xcc, U3copy_from_user_begin ! BR
+ alignaddr %o1, %g0, %o1 ! MS (Break-after)
.align 64
-1:
- EX3(ldda [%o1 + 0x008] %asi, %f2)
- faligndata %f12, %f14, %f28
- EX3(ldda [%o1 + 0x010] %asi, %f4)
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- EX3(ldda [%o1 + 0x018] %asi, %f6)
- faligndata %f0, %f2, %f16
-
- EX3(ldda [%o1 + 0x020] %asi, %f8)
- faligndata %f2, %f4, %f18
- EX3(ldda [%o1 + 0x028] %asi, %f10)
- faligndata %f4, %f6, %f20
- EX3(ldda [%o1 + 0x030] %asi, %f12)
- faligndata %f6, %f8, %f22
- EX3(ldda [%o1 + 0x038] %asi, %f14)
- faligndata %f8, %f10, %f24
-
- EX3(ldda [%o1 + 0x040] %asi, %f0)
- prefetcha [%o1 + 0x180] %asi, #one_read
- faligndata %f10, %f12, %f26
- subcc %o3, 0x01, %o3
- add %o1, 0x40, %o1
- bg,pt %XCC, 1b
- add %o0, 0x40, %o0
+U3copy_from_user_begin:
+#ifdef __KERNEL__
+ .globl U3copy_from_user_nop_1_6
+U3copy_from_user_nop_1_6:
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
+ sethi %uhi(DCU_PE), %o3
+ sllx %o3, 32, %o3
+ or %g3, %o3, %o3
+ stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
+ membar #Sync
+#endif
+ prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
+ prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
+ andn %o2, (0x40 - 1), %o4 ! A0
+ prefetcha [%o1 + 0x080] %asi, #one_read ! MS Group3
+ cmp %o4, 0x140 ! A0
+ prefetcha [%o1 + 0x0c0] %asi, #one_read ! MS Group4
+ EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0) ! MS Group5 (%f0 results at G8)
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x100] %asi, #one_read ! MS Group6
+1: EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0) ! AX (%f2 results at G9)
+ cmp %o4, 0x180 ! A1
+ bge,a,pt %icc, 1f ! BR
+ prefetcha [%o1 + 0x140] %asi, #one_read ! MS Group7
+1: EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0) ! AX (%f4 results at G10)
+ cmp %o4, 0x1c0 ! A1
+ bge,a,pt %icc, 1f ! BR
+
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS Group8
+1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
+ EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0) ! AX (%f6 results at G12)
+ faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
+ EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0) ! MS (%f8 results at G13)
+ faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
+ EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0) ! MS (%f10 results at G15)
+ faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
+
+ EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0) ! MS (%f12 results at G16)
+ faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
+ EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0) ! MS (%f14 results at G18)
+ faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
+ EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0) ! MS (%f0 results at G19)
+
+ /* We only use the first loop if len > (7 * 64). */
+ subcc %o4, 0x1c0, %o4 ! A0 Group17
+ bg,pt %icc, U3copy_from_user_loop1 ! BR
+ add %o1, 0x40, %o1 ! A1
+
+ add %o4, 0x140, %o4 ! A0 Group18
+ ba,pt %xcc, U3copy_from_user_loop2 ! BR
+ srl %o4, 6, %o3 ! A0 Group19
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+
+ /* This loop performs the copy and queues new prefetches.
+ * We drop into the second loop when len <= (5 * 64). Note
+ * that this (5 * 64) factor has been subtracted from len
+ * already.
+ */
+U3copy_from_user_loop1:
+ EX2(ldda [%o1 + 0x008] %asi, %f2) ! MS Group2 (%f2 results at G5)
+ faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
+ EX2(ldda [%o1 + 0x010] %asi, %f4) ! MS Group3 (%f4 results at G6)
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ EX2(ldda [%o1 + 0x018] %asi, %f6) ! AX (%f6 results at G7)
+
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+ EX2(ldda [%o1 + 0x020] %asi, %f8) ! MS (%f8 results at G15)
+ faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
+ EX2(ldda [%o1 + 0x028] %asi, %f10) ! MS (%f10 results at G16)
+ faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
+ EX2(ldda [%o1 + 0x030] %asi, %f12) ! MS (%f12 results at G17)
+ faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
+ EX2(ldda [%o1 + 0x038] %asi, %f14) ! MS (%f14 results at G18)
+
+ faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
+ EX2(ldda [%o1 + 0x040] %asi, %f0) ! AX (%f0 results at G19)
+ prefetcha [%o1 + 0x180] %asi, #one_read ! MS
+ faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
+ subcc %o4, 0x40, %o4 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_from_user_loop1 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
+
+U3copy_from_user_loop2_enter:
+ mov 5, %o3 ! A1
+
+ /* This loop performs on the copy, no new prefetches are
+ * queued. We do things this way so that we do not perform
+ * any spurious prefetches past the end of the src buffer.
+ */
+U3copy_from_user_loop2:
+ EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA Group2
+ EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS
+ faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
+
+ EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group13
+ EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group14
+ EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group15
+ EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group16
+
+ EX3(ldda [%o1 + 0x040] %asi, %f0) ! AX
+ faligndata %f10, %f12, %f26 ! FGA Group17
+ subcc %o3, 0x01, %o3 ! A0
+ add %o1, 0x40, %o1 ! A1
+ bg,pt %xcc, U3copy_from_user_loop2 ! BR
+ add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */
- EX3(ldda [%o1 + 0x008] %asi, %f2)
- faligndata %f12, %f14, %f28
- EX3(ldda [%o1 + 0x010] %asi, %f4)
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- EX3(ldda [%o1 + 0x018] %asi, %f6)
- faligndata %f0, %f2, %f16
- EX3(ldda [%o1 + 0x020] %asi, %f8)
- faligndata %f2, %f4, %f18
- EX3(ldda [%o1 + 0x028] %asi, %f10)
- faligndata %f4, %f6, %f20
- EX3(ldda [%o1 + 0x030] %asi, %f12)
- faligndata %f6, %f8, %f22
- EX3(ldda [%o1 + 0x038] %asi, %f14)
- faligndata %f8, %f10, %f24
- cmp %g1, 0
- be,pt %XCC, 1f
- add %o0, 0x40, %o0
- EX4(ldda [%o1 + 0x040] %asi, %f0)
-1: faligndata %f10, %f12, %f26
- faligndata %f12, %f14, %f28
- faligndata %f14, %f0, %f30
- stda %f16, [%o0] ASI_BLK_P
- add %o0, 0x40, %o0
- add %o1, 0x40, %o1
-
- membar #Sync
+U3copy_from_user_loopfini:
+ EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
+ faligndata %f12, %f14, %f28 ! FGA
+ EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS Group19
+ faligndata %f14, %f0, %f30 ! FGA
+ stda %f16, [%o0] ASI_BLK_P ! MS Group20
+ EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
+ faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
+ EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
+ faligndata %f2, %f4, %f18 ! FGA Group12
+ EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
+ faligndata %f4, %f6, %f20 ! FGA Group13
+ EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
+ faligndata %f6, %f8, %f22 ! FGA Group14
+ EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
+ faligndata %f8, %f10, %f24 ! FGA Group15
+ cmp %g1, 0 ! A0
+ be,pt %icc, 1f ! BR
+ add %o0, 0x40, %o0 ! A1
+ EX4(ldda [%o1 + 0x040] %asi, %f0) ! MS
+1: faligndata %f10, %f12, %f26 ! FGA Group16
+ faligndata %f12, %f14, %f28 ! FGA Group17
+ faligndata %f14, %f0, %f30 ! FGA Group18
+ stda %f16, [%o0] ASI_BLK_P ! MS
+ add %o0, 0x40, %o0 ! A0
+ add %o1, 0x40, %o1 ! A1
+#ifdef __KERNEL__
+ .globl U3copy_from_user_nop_2_3
+U3copy_from_user_nop_2_3:
+ mov PRIMARY_CONTEXT, %o3
+ stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
+ stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
+#endif
+ membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer.
+ * load past the end of the src buffer just like similar
+ * code found in U3copy_from_user_toosmall processing.
*/
- and %o2, 0x3f, %o2
- andcc %o2, 0x38, %g2
- be,pn %XCC, 10f
- subcc %g2, 0x8, %g2
- be,pn %XCC, 10f
- cmp %g1, 0
-
- be,a,pt %XCC, 1f
- EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0)
-
-1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0)
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f0, %f2, %f8
- std %f8, [%o0 + 0x00]
- be,pn %XCC, 10f
- add %o0, 0x8, %o0
- EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0)
- add %o1, 0x8, %o1
- sub %o2, 0x8, %o2
- subcc %g2, 0x8, %g2
- faligndata %f2, %f0, %f8
- std %f8, [%o0 + 0x00]
- bne,pn %XCC, 1b
- add %o0, 0x8, %o0
+U3copy_from_user_loopend:
+ and %o2, 0x3f, %o2 ! A0 Group
+ andcc %o2, 0x38, %g2 ! A0 Group
+ be,pn %icc, U3copy_from_user_endcruft ! BR
+ subcc %g2, 0x8, %g2 ! A1
+ be,pn %icc, U3copy_from_user_endcruft ! BR Group
+ cmp %g1, 0 ! A0
+
+ be,a,pt %icc, 1f ! BR Group
+ EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0) ! MS
+
+1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f0, %f2, %f8 ! FGA Group
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ be,pn %icc, U3copy_from_user_endcruft ! BR
+ add %o0, 0x8, %o0 ! A0
+ EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0) ! MS Group
+ add %o1, 0x8, %o1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA
+ std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A0 Group
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-10:
+U3copy_from_user_endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %XCC, 85f
- sub %o0, %o1, %o3
-
- andcc %g1, 0x7, %g0
- bne,pn %icc, 90f
- andcc %o2, 0x8, %g0
- be,pt %icc, 1f
+ be,pn %icc, U3copy_from_user_short_ret
nop
- EXNV(ldxa [%o1] %asi, %o5, add %o2, %g0)
- stx %o5, [%o1 + %o3]
- add %o1, 0x8, %o1
+ ba,a,pt %xcc, U3copy_from_user_short
-1: andcc %o2, 0x4, %g0
- be,pt %icc, 1f
- nop
- EXNV(lduwa [%o1] %asi, %o5, and %o2, 0x7)
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
+ /* If we get here, then 32 <= len < (6 * 64) */
+U3copy_from_user_toosmall:
-1: andcc %o2, 0x2, %g0
- be,pt %icc, 1f
- nop
- EXNV(lduha [%o1] %asi, %o5, and %o2, 0x3)
- sth %o5, [%o1 + %o3]
- add %o1, 0x2, %o1
+#ifdef SMALL_COPY_USES_FPU
-1: andcc %o2, 0x1, %g0
- be,pt %icc, 85f
- nop
- EXNV(lduba [%o1] %asi, %o5, and %o2, 0x1)
- ba,pt %xcc, 85f
- stb %o5, [%o1 + %o3]
-
-70: /* 16 < len <= 64 */
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
-
- andn %o2, 0x7, %o4
- and %o2, 0x7, %o2
-1: subcc %o4, 0x8, %o4
- EXNV8(ldxa [%o1] %asi, %o5, add %o2, %o4)
- stx %o5, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
- andcc %o2, 0x4, %g0
- be,pt %XCC, 1f
- nop
- sub %o2, 0x4, %o2
- EXNV4(lduwa [%o1] %asi, %o5, add %o2, %g0)
- stw %o5, [%o1 + %o3]
- add %o1, 0x4, %o1
-1: cmp %o2, 0
- be,pt %XCC, 85f
- nop
- ba,pt %xcc, 90f
- nop
+ /* Is 'dst' already aligned on an 8-byte boundary? */
+ be,pt %xcc, 2f ! BR Group
+
+ /* Compute abs((dst & 7) - 8) into %g2. This is the number
+ * of bytes to copy to make 'dst' 8-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %g2, 0x8, %g2 ! A0
+ sub %g0, %g2, %g2 ! A0 Group (reg-dep)
+ sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
- bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ /* Copy %g2 bytes from src to dst, one byte at a time. */
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group) (%o3 in 3 cycles)
+ add %o1, 0x1, %o1 ! A1
+ add %o0, 0x1, %o0 ! A0 Group
+ subcc %g2, 0x1, %g2 ! A1
-1:
- subcc %o2, 4, %o2
- EXNV(lduwa [%o1] %asi, %g1, add %o2, %g0)
- stw %g1, [%o1 + %o3]
- bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ bg,pt %icc, 1b ! BR Group
+ stb %o3, [%o0 + -1] ! MS Group
-85: retl
- clr %o0
+2: VISEntryHalf ! MS+MS
- .align 32
-90:
- subcc %o2, 1, %o2
- EXNV(lduba [%o1] %asi, %g1, add %o2, %g0)
- stb %g1, [%o1 + %o3]
- bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- clr %o0
+ /* Compute (len - (len % 8)) into %g2. This is guaranteed
+ * to be nonzero.
+ */
+ andn %o2, 0x7, %g2 ! A0 Group
+
+ /* You may read this and believe that it allows reading
+ * one 8-byte longword past the end of src. It actually
+ * does not, as %g2 is subtracted as loads are done from
+ * src, so we always stop before running off the end.
+ * Also, we are guaranteed to have at least 0x10 bytes
+ * to move here.
+ */
+ sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
+ alignaddr %o1, %g0, %g1 ! MS (Break-after)
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group (1-cycle stall)
+ add %g1, 0x8, %g1 ! A0
+
+1: EX(ldda [%g1 + 0x00] %asi, %f2, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+ subcc %g2, 0x8, %g2 ! A0 Group
+
+ faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+ be,pn %icc, 2f ! BR
+
+ add %o0, 0x8, %o0 ! A1
+ EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group
+ add %g1, 0x8, %g1 ! A0
+ sub %o2, 0x8, %o2 ! A1
+
+ subcc %g2, 0x8, %g2 ! A0 Group
+ faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
+ std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
+ add %o1, 0x8, %o1 ! A0
+
+ bne,pn %icc, 1b ! BR
+ add %o0, 0x8, %o0 ! A1
+
+ /* Nothing left to copy? */
+2: cmp %o2, 0 ! A0 Group
+ VISExitHalf ! A0+MS
+ be,pn %icc, U3copy_from_user_short_ret! BR Group
+ nop ! A0
+ ba,a,pt %xcc, U3copy_from_user_short ! BR Group
+
+#else /* !(SMALL_COPY_USES_FPU) */
+
+ xor %o1, %o0, %g2
+ andcc %g2, 0x7, %g0
+ bne,pn %icc, U3copy_from_user_short
+ andcc %o1, 0x7, %g2
+
+ be,pt %xcc, 2f
+ sub %g2, 0x8, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+
+1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
+ bg,pt %icc, 1b
+ stb %o3, [%o0 + -1]
+
+2: andn %o2, 0x7, %g2
+ sub %o2, %g2, %o2
+
+3: EXNV(ldxa [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
+ subcc %g2, 0x8, %g2
+ bg,pt %icc, 3b
+ stx %o3, [%o0 + -8]
+
+ cmp %o2, 0
+ bne,pn %icc, U3copy_from_user_short
+ nop
+ ba,a,pt %xcc, U3copy_from_user_short_ret
+
+#endif /* !(SMALL_COPY_USES_FPU) */
+#ifdef __KERNEL__
+ .globl U3cfu_fixup
U3cfu_fixup:
/* Since this is copy_from_user(), zero out the rest of the
* kernel buffer.
2: retl
mov %o1, %o0
+#endif