2 * x86-64 changes / gcc fixes from Andi Kleen.
3 * Copyright 2002 Andi Kleen, SuSE Labs.
5 * This hasn't been optimized for the hammer yet, but there are likely
6 * no advantages to be gotten from x86-64 here anyways.
9 typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
11 /* Doesn't use gcc to save the XMM registers, because there is no easy way to
12 tell it to do a clts before the register saving. */
13 #define XMMS_SAVE do { \
15 if (!(current_thread_info()->status & TS_USEDFPU)) \
17 __asm__ __volatile__ ( \
18 "movups %%xmm0,(%1) ;\n\t" \
19 "movups %%xmm1,0x10(%1) ;\n\t" \
20 "movups %%xmm2,0x20(%1) ;\n\t" \
21 "movups %%xmm3,0x30(%1) ;\n\t" \
27 #define XMMS_RESTORE do { \
30 "movups (%1),%%xmm0 ;\n\t" \
31 "movups 0x10(%1),%%xmm1 ;\n\t" \
32 "movups 0x20(%1),%%xmm2 ;\n\t" \
33 "movups 0x30(%1),%%xmm3 ;\n\t" \
35 : "r" (cr0), "r" (xmm_save) \
37 if (!(current_thread_info()->status & TS_USEDFPU)) \
42 #define OFFS(x) "16*("#x")"
43 #define PF_OFFS(x) "256+16*("#x")"
44 #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
45 #define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
46 #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
47 #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
48 #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
49 #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
50 #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
51 #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
52 #define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
53 #define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
54 #define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
55 #define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
56 #define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
60 xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
62 unsigned int lines = bytes >> 8;
64 xmm_store_t xmm_save[4];
100 " addq %[inc], %[p1] ;\n"
101 " addq %[inc], %[p2] ;\n"
102 " decl %[cnt] ; jnz 1b"
103 : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
111 xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
114 unsigned int lines = bytes >> 8;
115 xmm_store_t xmm_save[4];
120 __asm__ __volatile__ (
158 " addq %[inc], %[p1] ;\n"
159 " addq %[inc], %[p2] ;\n"
160 " addq %[inc], %[p3] ;\n"
161 " decl %[cnt] ; jnz 1b"
162 : [cnt] "+r" (lines),
163 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
170 xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
171 unsigned long *p3, unsigned long *p4)
173 unsigned int lines = bytes >> 8;
174 xmm_store_t xmm_save[4];
179 __asm__ __volatile__ (
223 " addq %[inc], %[p1] ;\n"
224 " addq %[inc], %[p2] ;\n"
225 " addq %[inc], %[p3] ;\n"
226 " addq %[inc], %[p4] ;\n"
227 " decl %[cnt] ; jnz 1b"
228 : [cnt] "+c" (lines),
229 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
237 xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
238 unsigned long *p3, unsigned long *p4, unsigned long *p5)
240 unsigned int lines = bytes >> 8;
241 xmm_store_t xmm_save[4];
246 __asm__ __volatile__ (
296 " addq %[inc], %[p1] ;\n"
297 " addq %[inc], %[p2] ;\n"
298 " addq %[inc], %[p3] ;\n"
299 " addq %[inc], %[p4] ;\n"
300 " addq %[inc], %[p5] ;\n"
301 " decl %[cnt] ; jnz 1b"
302 : [cnt] "+c" (lines),
303 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
311 static struct xor_block_template xor_block_sse = {
312 .name = "generic_sse",
319 #undef XOR_TRY_TEMPLATES
320 #define XOR_TRY_TEMPLATES \
322 xor_speed(&xor_block_sse); \
325 /* We force the use of the SSE xor block because it can write around L2.
326 We may also be able to load into the L1 only depending on how the cpu
327 deals with a load to a line that is being prefetched. */
328 #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)