1
/* Optimized mempcpy implementation for POWER7.
2
Copyright (C) 2010 Free Software Foundation, Inc.
3
Contributed by Luis Machado <luisgpm@br.ibm.com>.
4
This file is part of the GNU C Library.
6
The GNU C Library is free software; you can redistribute it and/or
7
modify it under the terms of the GNU Lesser General Public
8
License as published by the Free Software Foundation; either
9
version 2.1 of the License, or (at your option) any later version.
11
The GNU C Library is distributed in the hope that it will be useful,
12
but WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
Lesser General Public License for more details.
16
You should have received a copy of the GNU Lesser General Public
17
License along with the GNU C Library; if not, write to the Free
18
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
26
/* __ptr_t [r3] __mempcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
27
Returns 'dst' + 'len'. */
30
EALIGN (BP_SYM (__mempcpy), 5, 0)
38
ble cr1,L(copy_LT_32) /* If move < 32 bytes use short move
41
andi. 11,3,7 /* Check alignment of DST. */
44
clrldi 10,4,61 /* Check alignment of SRC. */
45
cmpld cr6,10,11 /* SRC and DST alignments match? */
48
bne cr6,L(copy_GE_32_unaligned)
50
srdi 9,5,3 /* Number of full quadwords remaining. */
52
beq L(copy_GE_32_aligned_cont)
58
/* Get the SRC aligned to 8 bytes. */
76
clrldi 10,12,61 /* Check alignment of SRC again. */
77
srdi 9,31,3 /* Number of full doublewords remaining. */
79
L(copy_GE_32_aligned_cont):
89
/* Copy 1~3 doublewords so the main loop starts
90
at a multiple of 32 bytes. */
109
1: /* Copy 1 doubleword and set the counter. */
118
/* Main aligned copy loop. Copies 32-bytes at a time. */
135
/* Check for tail bytes. */
144
/* At this point we have a tail of 0-7 bytes and we know that the
145
destination is doubleword-aligned. */
146
4: /* Copy 4 bytes. */
153
2: /* Copy 2 bytes. */
160
1: /* Copy 1 byte. */
165
0: /* Return DST + LEN pointer. */
171
/* Handle copies of 0~31 bytes. */
179
/* At least 9 bytes to go. */
185
beq L(copy_LT_32_aligned)
187
/* Force 4-bytes alignment for SRC. */
196
1: bf 31,L(end_4bytes_alignment)
204
L(end_4bytes_alignment):
208
L(copy_LT_32_aligned):
209
/* At least 6 bytes to go, and SRC is word-aligned. */
223
8: /* Copy 8 bytes. */
232
4: /* Copy 4 bytes. */
239
2: /* Copy 2-3 bytes. */
252
1: /* Copy 1 byte. */
257
0: /* Return DST + LEN pointer. */
262
/* Handles copies of 0~8 bytes. */
267
/* Though we could've used ld/std here, they are still
268
slow for unaligned cases. */
274
ld 3,-16(1) /* Return DST + LEN pointer. */
279
4: /* Copies 4~7 bytes. */
295
5: /* Copy 1 byte. */
301
0: /* Return DST + LEN pointer. */
306
/* Handle copies of 32+ bytes where DST is aligned (to quadword) but
307
SRC is not. Use aligned quadword loads from SRC, shifted to realign
308
the data, allowing for aligned DST stores. */
310
L(copy_GE_32_unaligned):
311
clrldi 0,0,60 /* Number of bytes until the 1st
313
andi. 11,3,15 /* Check alignment of DST (against
315
srdi 9,5,4 /* Number of full quadwords remaining. */
317
beq L(copy_GE_32_unaligned_cont)
319
/* SRC is not quadword aligned, get it aligned. */
324
/* Vector instructions work best when proper alignment (16-bytes)
325
is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
326
1: /* Copy 1 byte. */
333
2: /* Copy 2 bytes. */
340
4: /* Copy 4 bytes. */
347
8: /* Copy 8 bytes. */
355
clrldi 10,12,60 /* Check alignment of SRC. */
356
srdi 9,31,4 /* Number of full quadwords remaining. */
358
/* The proper alignment is present, it is OK to copy the bytes now. */
359
L(copy_GE_32_unaligned_cont):
361
/* Setup two indexes to speed up the indexed vector operations. */
363
li 6,16 /* Index for 16-bytes offsets. */
364
li 7,32 /* Index for 32-bytes offsets. */
366
srdi 8,31,5 /* Setup the loop counter. */
373
bf 31,L(setup_unaligned_loop)
375
/* Copy another 16 bytes to align to 32-bytes due to the loop . */
383
L(setup_unaligned_loop):
385
ble cr6,L(end_unaligned_loop)
387
/* Copy 32 bytes at a time using vector instructions. */
391
/* Note: vr6/vr10 may contain data that was already copied,
392
but in order to get proper alignment, we may have to copy
393
some portions again. This is faster than having unaligned
394
vector instructions though. */
396
lvx 4,11,6 /* vr4 = r11+16. */
397
vperm 6,3,4,5 /* Merge the correctly-aligned portions
398
of vr3/vr4 into vr6. */
399
lvx 3,11,7 /* vr3 = r11+32. */
400
vperm 10,4,3,5 /* Merge the correctly-aligned portions
401
of vr3/vr4 into vr10. */
407
bdnz L(unaligned_loop)
410
L(end_unaligned_loop):
412
/* Check for tail bytes. */
420
/* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
421
8: /* Copy 8 bytes. */
430
4: /* Copy 4 bytes. */
437
2: /* Copy 2~3 bytes. */
444
1: /* Copy 1 byte. */
449
0: /* Return DST + LEN pointer. */
455
END_GEN_TB (BP_SYM (__mempcpy),TB_TOCLESS)
456
libc_hidden_def (BP_SYM (__mempcpy))
457
weak_alias (BP_SYM (__mempcpy), BP_SYM (mempcpy))
458
libc_hidden_builtin_def (mempcpy)