1 /* U3memcpy.S: UltraSparc-III optimized memcpy.
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
7 #include <asm/visasm.h>
10 #include <asm/spitfire.h>
12 #define ASI_BLK_P 0xf0
14 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
15 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
22 .register %g2,#scratch
23 .register %g3,#scratch
25 /* Special/non-trivial issues of this code:
27 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
28 * 2) Only low 32 FPU registers are used so that only the
29 * lower half of the FPU register set is dirtied by this
30 * code. This is especially important in the kernel.
31 * 3) This code never prefetches cachelines past the end
32 * of the source buffer.
38 /* The cheetah's flexible spine, oversized liver, enlarged heart,
39 * slender muscular body, and claws make it the swiftest hunter
40 * in Africa and the fastest animal on land. Can reach speeds
41 * of up to 2.4GB per second.
45 U3memcpy: /* %o0=dst, %o1=src, %o2=len */
61 /* Here len >= 256 and condition codes reflect execution
62 * of "andcc %o0, 0x7, %g2", done by caller.
66 /* Is 'dst' already aligned on an 64-byte boundary? */
69 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
70 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
71 * subtract this from 'len'.
77 /* Copy %g2 bytes from src to dst, one byte at a time. */
78 1: ldub [%o1 + 0x00], %o3
89 alignaddr %o1, %g0, %o1
93 membar #StoreLoad | #StoreStore | #LoadStore
94 prefetch [%o1 + 0x000], #one_read
95 prefetch [%o1 + 0x040], #one_read
96 andn %o2, (0x40 - 1), %o4
97 prefetch [%o1 + 0x080], #one_read
98 prefetch [%o1 + 0x0c0], #one_read
99 ldd [%o1 + 0x000], %f0
100 prefetch [%o1 + 0x100], #one_read
101 ldd [%o1 + 0x008], %f2
102 prefetch [%o1 + 0x140], #one_read
103 ldd [%o1 + 0x010], %f4
104 prefetch [%o1 + 0x180], #one_read
105 faligndata %f0, %f2, %f16
106 ldd [%o1 + 0x018], %f6
107 faligndata %f2, %f4, %f18
108 ldd [%o1 + 0x020], %f8
109 faligndata %f4, %f6, %f20
110 ldd [%o1 + 0x028], %f10
111 faligndata %f6, %f8, %f22
113 ldd [%o1 + 0x030], %f12
114 faligndata %f8, %f10, %f24
115 ldd [%o1 + 0x038], %f14
116 faligndata %f10, %f12, %f26
117 ldd [%o1 + 0x040], %f0
126 ldd [%o1 + 0x008], %f2
127 faligndata %f12, %f14, %f28
128 ldd [%o1 + 0x010], %f4
129 faligndata %f14, %f0, %f30
130 stda %f16, [%o0] ASI_BLK_P
131 ldd [%o1 + 0x018], %f6
132 faligndata %f0, %f2, %f16
134 ldd [%o1 + 0x020], %f8
135 faligndata %f2, %f4, %f18
136 ldd [%o1 + 0x028], %f10
137 faligndata %f4, %f6, %f20
138 ldd [%o1 + 0x030], %f12
139 faligndata %f6, %f8, %f22
140 ldd [%o1 + 0x038], %f14
141 faligndata %f8, %f10, %f24
143 ldd [%o1 + 0x040], %f0
144 prefetch [%o1 + 0x180], #one_read
145 faligndata %f10, %f12, %f26
151 /* Finally we copy the last full 64-byte block. */
152 ldd [%o1 + 0x008], %f2
153 faligndata %f12, %f14, %f28
154 ldd [%o1 + 0x010], %f4
155 faligndata %f14, %f0, %f30
156 stda %f16, [%o0] ASI_BLK_P
157 ldd [%o1 + 0x018], %f6
158 faligndata %f0, %f2, %f16
159 ldd [%o1 + 0x020], %f8
160 faligndata %f2, %f4, %f18
161 ldd [%o1 + 0x028], %f10
162 faligndata %f4, %f6, %f20
163 ldd [%o1 + 0x030], %f12
164 faligndata %f6, %f8, %f22
165 ldd [%o1 + 0x038], %f14
166 faligndata %f8, %f10, %f24
170 ldd [%o1 + 0x040], %f0
171 1: faligndata %f10, %f12, %f26
172 faligndata %f12, %f14, %f28
173 faligndata %f14, %f0, %f30
174 stda %f16, [%o0] ASI_BLK_P
179 /* Now we copy the (len modulo 64) bytes at the end.
180 * Note how we borrow the %f0 loaded above.
182 * Also notice how this code is careful not to perform a
183 * load past the end of the src buffer.
193 ldd [%o1 + 0x00], %f0
195 1: ldd [%o1 + 0x08], %f2
199 faligndata %f0, %f2, %f8
200 std %f8, [%o0 + 0x00]
203 ldd [%o1 + 0x08], %f0
207 faligndata %f2, %f0, %f8
208 std %f8, [%o0 + 0x00]
212 /* If anything is left, we copy it one byte at a time.
213 * Note that %g1 is (src & 0x3) saved above before the
214 * alignaddr was performed.
232 1: andcc %o2, 0x4, %g0
239 1: andcc %o2, 0x2, %g0
246 1: andcc %o2, 0x1, %g0
253 70: /* 16 < len <= 64 */
259 1: subcc %o4, 0x8, %o4
277 80: /* 0 < len <= 16 */
302 /* Act like copy_{to,in}_user(), ie. return zero instead
303 * of original destination pointer. This is invoked when
304 * copy_{to,in}_user() finds that %asi is kernel space.
306 .globl U3memcpy_user_stub
314 restore %g0, %g0, %o0