204
204
processor's decoders, but it's not always possible.
206
206
#ifdef HAVE_SSE /* Only P3 (may be Cyrix3) */
207
if(((unsigned long)from) & 15)
208
/* if SRC is misaligned */
211
__asm__ __volatile__ (
213
"movups (%0), %%xmm0\n"
214
"movups 16(%0), %%xmm1\n"
215
"movups 32(%0), %%xmm2\n"
216
"movups 48(%0), %%xmm3\n"
217
"movntps %%xmm0, (%1)\n"
218
"movntps %%xmm1, 16(%1)\n"
219
"movntps %%xmm2, 32(%1)\n"
220
"movntps %%xmm3, 48(%1)\n"
221
:: "r" (from), "r" (to) : "memory");
222
((const unsigned char *)from)+=64;
223
((unsigned char *)to)+=64;
227
Only if SRC is aligned on 16-byte boundary.
228
It allows to use movaps instead of movups, which required data
229
to be aligned or a general-protection exception (#GP) is generated.
233
__asm__ __volatile__ (
235
"movaps (%0), %%xmm0\n"
236
"movaps 16(%0), %%xmm1\n"
237
"movaps 32(%0), %%xmm2\n"
238
"movaps 48(%0), %%xmm3\n"
239
"movntps %%xmm0, (%1)\n"
240
"movntps %%xmm1, 16(%1)\n"
241
"movntps %%xmm2, 32(%1)\n"
242
"movntps %%xmm3, 48(%1)\n"
243
:: "r" (from), "r" (to) : "memory");
244
((const unsigned char *)from)+=64;
245
((unsigned char *)to)+=64;
207
if(((unsigned long)from) & 15)
208
/* if SRC is misaligned */
211
__asm__ __volatile__ (
213
"movups (%0), %%xmm0\n"
214
"movups 16(%0), %%xmm1\n"
215
"movups 32(%0), %%xmm2\n"
216
"movups 48(%0), %%xmm3\n"
217
"movntps %%xmm0, (%1)\n"
218
"movntps %%xmm1, 16(%1)\n"
219
"movntps %%xmm2, 32(%1)\n"
220
"movntps %%xmm3, 48(%1)\n"
221
:: "r" (from), "r" (to) : "memory");
222
((const unsigned char *)from)+=64;
223
((unsigned char *)to)+=64;
227
Only if SRC is aligned on 16-byte boundary.
228
It allows to use movaps instead of movups, which required data
229
to be aligned or a general-protection exception (#GP) is generated.
233
__asm__ __volatile__ (
235
"movaps (%0), %%xmm0\n"
236
"movaps 16(%0), %%xmm1\n"
237
"movaps 32(%0), %%xmm2\n"
238
"movaps 48(%0), %%xmm3\n"
239
"movntps %%xmm0, (%1)\n"
240
"movntps %%xmm1, 16(%1)\n"
241
"movntps %%xmm2, 32(%1)\n"
242
"movntps %%xmm3, 48(%1)\n"
243
:: "r" (from), "r" (to) : "memory");
244
((const unsigned char *)from)+=64;
245
((unsigned char *)to)+=64;
248
/* Align destination at BLOCK_SIZE boundary */
249
for(; ((ptrdiff_t)to & (BLOCK_SIZE-1)) && i>0; i--)
251
__asm__ __volatile__ (
248
/* Align destination at BLOCK_SIZE boundary */
249
for(; ((uintptr_t)to & (BLOCK_SIZE-1)) && i>0; i--)
251
__asm__ __volatile__ (
252
252
#ifndef HAVE_MMX1
256
"movq 8(%0), %%mm1\n"
257
"movq 16(%0), %%mm2\n"
258
"movq 24(%0), %%mm3\n"
259
"movq 32(%0), %%mm4\n"
260
"movq 40(%0), %%mm5\n"
261
"movq 48(%0), %%mm6\n"
262
"movq 56(%0), %%mm7\n"
263
MOVNTQ" %%mm0, (%1)\n"
264
MOVNTQ" %%mm1, 8(%1)\n"
265
MOVNTQ" %%mm2, 16(%1)\n"
266
MOVNTQ" %%mm3, 24(%1)\n"
267
MOVNTQ" %%mm4, 32(%1)\n"
268
MOVNTQ" %%mm5, 40(%1)\n"
269
MOVNTQ" %%mm6, 48(%1)\n"
270
MOVNTQ" %%mm7, 56(%1)\n"
271
:: "r" (from), "r" (to) : "memory");
256
"movq 8(%0), %%mm1\n"
257
"movq 16(%0), %%mm2\n"
258
"movq 24(%0), %%mm3\n"
259
"movq 32(%0), %%mm4\n"
260
"movq 40(%0), %%mm5\n"
261
"movq 48(%0), %%mm6\n"
262
"movq 56(%0), %%mm7\n"
263
MOVNTQ" %%mm0, (%1)\n"
264
MOVNTQ" %%mm1, 8(%1)\n"
265
MOVNTQ" %%mm2, 16(%1)\n"
266
MOVNTQ" %%mm3, 24(%1)\n"
267
MOVNTQ" %%mm4, 32(%1)\n"
268
MOVNTQ" %%mm5, 40(%1)\n"
269
MOVNTQ" %%mm6, 48(%1)\n"
270
MOVNTQ" %%mm7, 56(%1)\n"
271
:: "r" (from), "r" (to) : "memory");
272
272
from = (const void *) (((const unsigned char *)from)+64);
273
to = (void *) (((unsigned char *)to)+64);
273
to = (void *) (((unsigned char *)to)+64);
276
/* printf(" %p %p\n", (ptrdiff_t)from&1023, (ptrdiff_t)to&1023); */
277
/* Pure Assembly cuz gcc is a bit unpredictable ;) */
276
/* printf(" %p %p\n", (uintptr_t)from&1023, (uintptr_t)to&1023); */
277
/* Pure Assembly cuz gcc is a bit unpredictable ;) */
281
"xorl %%eax, %%eax \n\t"
284
"movl (%0, %%eax), %%ebx \n\t"
285
"movl 32(%0, %%eax), %%ebx \n\t"
286
"movl 64(%0, %%eax), %%ebx \n\t"
287
"movl 96(%0, %%eax), %%ebx \n\t"
288
"addl $128, %%eax \n\t"
289
"cmpl %3, %%eax \n\t"
292
"xorl %%eax, %%eax \n\t"
296
"movq (%0, %%eax), %%mm0\n"
297
"movq 8(%0, %%eax), %%mm1\n"
298
"movq 16(%0, %%eax), %%mm2\n"
299
"movq 24(%0, %%eax), %%mm3\n"
300
"movq 32(%0, %%eax), %%mm4\n"
301
"movq 40(%0, %%eax), %%mm5\n"
302
"movq 48(%0, %%eax), %%mm6\n"
303
"movq 56(%0, %%eax), %%mm7\n"
304
MOVNTQ" %%mm0, (%1, %%eax)\n"
305
MOVNTQ" %%mm1, 8(%1, %%eax)\n"
306
MOVNTQ" %%mm2, 16(%1, %%eax)\n"
307
MOVNTQ" %%mm3, 24(%1, %%eax)\n"
308
MOVNTQ" %%mm4, 32(%1, %%eax)\n"
309
MOVNTQ" %%mm5, 40(%1, %%eax)\n"
310
MOVNTQ" %%mm6, 48(%1, %%eax)\n"
311
MOVNTQ" %%mm7, 56(%1, %%eax)\n"
312
"addl $64, %%eax \n\t"
313
"cmpl %3, %%eax \n\t"
281
"xorl %%eax, %%eax \n\t"
284
"movl (%0, %%eax), %%ebx \n\t"
285
"movl 32(%0, %%eax), %%ebx \n\t"
286
"movl 64(%0, %%eax), %%ebx \n\t"
287
"movl 96(%0, %%eax), %%ebx \n\t"
288
"addl $128, %%eax \n\t"
289
"cmpl %3, %%eax \n\t"
292
"xorl %%eax, %%eax \n\t"
296
"movq (%0, %%eax), %%mm0\n"
297
"movq 8(%0, %%eax), %%mm1\n"
298
"movq 16(%0, %%eax), %%mm2\n"
299
"movq 24(%0, %%eax), %%mm3\n"
300
"movq 32(%0, %%eax), %%mm4\n"
301
"movq 40(%0, %%eax), %%mm5\n"
302
"movq 48(%0, %%eax), %%mm6\n"
303
"movq 56(%0, %%eax), %%mm7\n"
304
MOVNTQ" %%mm0, (%1, %%eax)\n"
305
MOVNTQ" %%mm1, 8(%1, %%eax)\n"
306
MOVNTQ" %%mm2, 16(%1, %%eax)\n"
307
MOVNTQ" %%mm3, 24(%1, %%eax)\n"
308
MOVNTQ" %%mm4, 32(%1, %%eax)\n"
309
MOVNTQ" %%mm5, 40(%1, %%eax)\n"
310
MOVNTQ" %%mm6, 48(%1, %%eax)\n"
311
MOVNTQ" %%mm7, 56(%1, %%eax)\n"
312
"addl $64, %%eax \n\t"
313
"cmpl %3, %%eax \n\t"
316
316
#if CONFUSION_FACTOR > 0
317
/* a few percent speedup on out of order executing CPUs */
318
"movl %5, %%eax \n\t"
320
"movl (%0), %%ebx \n\t"
321
"movl (%0), %%ebx \n\t"
322
"movl (%0), %%ebx \n\t"
323
"movl (%0), %%ebx \n\t"
328
"xorl %%eax, %%eax \n\t"
334
: "+r" (from), "+r" (to), "+r" (i)
335
: "r" (BLOCK_SIZE), "i" (BLOCK_SIZE/64), "i" (CONFUSION_FACTOR)
342
__asm__ __volatile__ (
317
/* a few percent speedup on out of order executing CPUs */
318
"movl %5, %%eax \n\t"
320
"movl (%0), %%ebx \n\t"
321
"movl (%0), %%ebx \n\t"
322
"movl (%0), %%ebx \n\t"
323
"movl (%0), %%ebx \n\t"
328
"xorl %%eax, %%eax \n\t"
334
: "+r" (from), "+r" (to), "+r" (i)
335
: "r" (BLOCK_SIZE), "i" (BLOCK_SIZE/64), "i" (CONFUSION_FACTOR)
342
__asm__ __volatile__ (
343
343
#ifndef HAVE_MMX1
347
"movq 8(%0), %%mm1\n"
348
"movq 16(%0), %%mm2\n"
349
"movq 24(%0), %%mm3\n"
350
"movq 32(%0), %%mm4\n"
351
"movq 40(%0), %%mm5\n"
352
"movq 48(%0), %%mm6\n"
353
"movq 56(%0), %%mm7\n"
354
MOVNTQ" %%mm0, (%1)\n"
355
MOVNTQ" %%mm1, 8(%1)\n"
356
MOVNTQ" %%mm2, 16(%1)\n"
357
MOVNTQ" %%mm3, 24(%1)\n"
358
MOVNTQ" %%mm4, 32(%1)\n"
359
MOVNTQ" %%mm5, 40(%1)\n"
360
MOVNTQ" %%mm6, 48(%1)\n"
361
MOVNTQ" %%mm7, 56(%1)\n"
362
:: "r" (from), "r" (to) : "memory");
363
from = (const void *) (((const unsigned char *)from)+64);
364
to = (void *) (((unsigned char *)to)+64);
347
"movq 8(%0), %%mm1\n"
348
"movq 16(%0), %%mm2\n"
349
"movq 24(%0), %%mm3\n"
350
"movq 32(%0), %%mm4\n"
351
"movq 40(%0), %%mm5\n"
352
"movq 48(%0), %%mm6\n"
353
"movq 56(%0), %%mm7\n"
354
MOVNTQ" %%mm0, (%1)\n"
355
MOVNTQ" %%mm1, 8(%1)\n"
356
MOVNTQ" %%mm2, 16(%1)\n"
357
MOVNTQ" %%mm3, 24(%1)\n"
358
MOVNTQ" %%mm4, 32(%1)\n"
359
MOVNTQ" %%mm5, 40(%1)\n"
360
MOVNTQ" %%mm6, 48(%1)\n"
361
MOVNTQ" %%mm7, 56(%1)\n"
362
:: "r" (from), "r" (to) : "memory");
363
from = (const void *) (((const unsigned char *)from)+64);
364
to = (void *) (((unsigned char *)to)+64);
367
367
#endif /* Have SSE */
369
369
/* since movntq is weakly-ordered, a "sfence"
370
* is needed to become ordered again. */
371
__asm__ __volatile__ ("sfence":::"memory");
370
* is needed to become ordered again. */
371
__asm__ __volatile__ ("sfence":::"memory");
374
/* enables to use FPU */
375
__asm__ __volatile__ (EMMS:::"memory");
374
/* enables to use FPU */
375
__asm__ __volatile__ (EMMS:::"memory");
379
* Now do the tail of the block
381
if(len) small_memcpy(to, from, len);
379
* Now do the tail of the block
381
if(len) small_memcpy(to, from, len);