1 /* U3memcpy.S: UltraSparc-III optimized memcpy.
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
7 #include <asm/visasm.h>
10 #define ASI_BLK_P 0xf0
13 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
14 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
15 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
17 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
18 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
31 #define EX_RETVAL(x) x
35 #define LOAD(type,addr,dest) type [addr], dest
39 #define STORE(type,src,addr) type src, [addr]
43 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
47 #define FUNC_NAME U3memcpy
58 .register %g2,#scratch
59 .register %g3,#scratch
61 /* Special/non-trivial issues of this code:
63 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
64 * 2) Only low 32 FPU registers are used so that only the
65 * lower half of the FPU register set is dirtied by this
66 * code. This is especially important in the kernel.
67 * 3) This code never prefetches cachelines past the end
68 * of the source buffer.
74 /* The cheetah's flexible spine, oversized liver, enlarged heart,
75 * slender muscular body, and claws make it the swiftest hunter
76 * in Africa and the fastest animal on land. Can reach speeds
77 * of up to 2.4GB per second.
81 .type FUNC_NAME,#function
82 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
96 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
97 * o5 from here until we hit VISExitHalf.
101 /* Is 'dst' already aligned on an 64-byte boundary? */
105 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
106 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
107 * subtract this from 'len'.
117 1: subcc %g1, 0x1, %g1
118 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
119 EX_ST(STORE(stb, %o3, %o1 + %o4))
128 alignaddr %o1, %g0, %o1
130 EX_LD(LOAD(ldd, %o1, %f4))
131 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
134 faligndata %f4, %f6, %f0
135 EX_ST(STORE(std, %f0, %o0))
139 EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
142 faligndata %f6, %f4, %f2
143 EX_ST(STORE(std, %f2, %o0))
147 3: LOAD(prefetch, %o1 + 0x000, #one_read)
148 LOAD(prefetch, %o1 + 0x040, #one_read)
149 andn %o2, (0x40 - 1), %o4
150 LOAD(prefetch, %o1 + 0x080, #one_read)
151 LOAD(prefetch, %o1 + 0x0c0, #one_read)
152 LOAD(prefetch, %o1 + 0x100, #one_read)
153 EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
154 LOAD(prefetch, %o1 + 0x140, #one_read)
155 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
156 LOAD(prefetch, %o1 + 0x180, #one_read)
157 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
158 LOAD(prefetch, %o1 + 0x1c0, #one_read)
159 faligndata %f0, %f2, %f16
160 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
161 faligndata %f2, %f4, %f18
162 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
163 faligndata %f4, %f6, %f20
164 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
165 faligndata %f6, %f8, %f22
167 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
168 faligndata %f8, %f10, %f24
169 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
170 faligndata %f10, %f12, %f26
171 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
182 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
183 faligndata %f12, %f14, %f28
184 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
185 faligndata %f14, %f0, %f30
186 EX_ST(STORE_BLK(%f16, %o0))
187 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
188 faligndata %f0, %f2, %f16
191 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
192 faligndata %f2, %f4, %f18
193 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
194 faligndata %f4, %f6, %f20
195 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
197 faligndata %f6, %f8, %f22
198 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
200 faligndata %f8, %f10, %f24
201 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
202 LOAD(prefetch, %o1 + 0x1c0, #one_read)
203 faligndata %f10, %f12, %f26
207 /* Finally we copy the last full 64-byte block. */
209 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
210 faligndata %f12, %f14, %f28
211 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
212 faligndata %f14, %f0, %f30
213 EX_ST(STORE_BLK(%f16, %o0))
214 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
215 faligndata %f0, %f2, %f16
216 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
217 faligndata %f2, %f4, %f18
218 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
219 faligndata %f4, %f6, %f20
220 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
221 faligndata %f6, %f8, %f22
222 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
223 faligndata %f8, %f10, %f24
227 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
228 1: faligndata %f10, %f12, %f26
229 faligndata %f12, %f14, %f28
230 faligndata %f14, %f0, %f30
231 EX_ST(STORE_BLK(%f16, %o0))
236 /* Now we copy the (len modulo 64) bytes at the end.
237 * Note how we borrow the %f0 loaded above.
239 * Also notice how this code is careful not to perform a
240 * load past the end of the src buffer.
251 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
253 1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
256 faligndata %f0, %f2, %f8
257 EX_ST(STORE(std, %f8, %o0))
260 EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
263 faligndata %f2, %f0, %f8
264 EX_ST(STORE(std, %f8, %o0))
268 /* If anything is left, we copy it one byte at a time.
269 * Note that %g1 is (src & 0x3) saved above before the
270 * alignaddr was performed.
284 EX_LD(LOAD(ldx, %o1, %o5))
285 EX_ST(STORE(stx, %o5, %o1 + %o3))
288 1: andcc %o2, 0x4, %g0
291 EX_LD(LOAD(lduw, %o1, %o5))
292 EX_ST(STORE(stw, %o5, %o1 + %o3))
295 1: andcc %o2, 0x2, %g0
298 EX_LD(LOAD(lduh, %o1, %o5))
299 EX_ST(STORE(sth, %o5, %o1 + %o3))
302 1: andcc %o2, 0x1, %g0
305 EX_LD(LOAD(ldub, %o1, %o5))
307 EX_ST(STORE(stb, %o5, %o1 + %o3))
310 70: /* 16 < len <= 64 */
317 1: subcc %o4, 0x10, %o4
318 EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
319 EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
320 EX_ST(STORE(stx, %o5, %o1 + %o3))
322 EX_ST(STORE(stx, %g1, %o1 + %o3))
325 73: andcc %o2, 0x8, %g0
329 EX_LD(LOAD(ldx, %o1, %o5))
330 EX_ST(STORE(stx, %o5, %o1 + %o3))
332 1: andcc %o2, 0x4, %g0
336 EX_LD(LOAD(lduw, %o1, %o5))
337 EX_ST(STORE(stw, %o5, %o1 + %o3))
353 EX_LD(LOAD(ldub, %o1, %o5))
354 EX_ST(STORE(stb, %o5, %o1 + %o3))
370 EX_LD(LOAD(ldx, %o1, %g2))
374 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
379 EX_ST(STORE(stx, %o5, %o0))
392 80: /* 0 < len <= 16 */
399 EX_LD(LOAD(lduw, %o1, %g1))
400 EX_ST(STORE(stw, %g1, %o1 + %o3))
405 mov EX_RETVAL(%g5), %o0
410 EX_LD(LOAD(ldub, %o1, %g1))
411 EX_ST(STORE(stb, %g1, %o1 + %o3))
415 mov EX_RETVAL(%g5), %o0
417 .size FUNC_NAME, .-FUNC_NAME