1
/* Optimized memcpy implementation for PowerPC A2.
2
Copyright (C) 2010 Free Software Foundation, Inc.
3
Contributed by Michael Brutman <brutman@us.ibm.com>.
4
This file is part of the GNU C Library.
6
The GNU C Library is free software; you can redistribute it and/or
7
modify it under the terms of the GNU Lesser General Public
8
License as published by the Free Software Foundation; either
9
version 2.1 of the License, or (at your option) any later version.
11
The GNU C Library is distributed in the hope that it will be useful,
12
but WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
Lesser General Public License for more details.
16
You should have received a copy of the GNU Lesser General Public
17
License along with the GNU C Library; if not, write to the Free
18
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25
#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
26
#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
29
EALIGN (BP_SYM (memcpy), 5, 0)
32
dcbt 0,r4 /* Prefetch ONE SRC cacheline */
33
cmplwi cr1,r5,16 /* is size < 16 ? */
34
mr r6,r3 /* Copy dest reg to r6; */
38
/* Big copy (16 bytes or more)
40
Figure out how far to the nearest quadword boundary, or if we are
43
r3 - return value (always)
44
r4 - current source addr
46
r6 - current dest addr
49
neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
50
clrlwi r8,r8,32-4 /* align to 16byte boundary */
51
sub r7,r4,r3 /* compute offset to src from dest */
52
cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
57
/* Destination is not aligned on quadword boundary. Get us to one.
59
r3 - return value (always)
60
r4 - current source addr
62
r6 - current dest addr
63
r7 - offset to src from dest
64
r8 - number of bytes to quadword boundary
67
mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
68
subf r5,r8,r5 /* adjust remaining len */
71
lbzx r0,r7,r6 /* copy 1 byte addr */
76
lhzx r0,r7,r6 /* copy 2 byte addr */
81
lwzx r0,r7,r6 /* copy 4 byte addr */
86
lfdx r0,r7,r6 /* copy 8 byte addr */
90
add r4,r7,r6 /* update src addr */
94
/* Dest is quadword aligned now.
96
Lots of decisions to make. If we are copying less than a cache
97
line we won't be here long. If we are not on a cache line
98
boundary we need to get there. And then we need to figure out
99
how many cache lines ahead to pre-touch.
101
r3 - return value (always)
102
r4 - current source addr
104
r6 - current dest addr
114
/* Establishes GOT addressability so we can load __cache_line_size
115
from static. This value was set from the aux vector during startup. */
119
addis r9,r9,__cache_line_size-1b@ha
120
lwz r9,__cache_line_size-1b@l(r9)
123
/* Load __cache_line_size from static. This value was set from the
124
aux vector during startup. */
125
lis r9,__cache_line_size@ha
126
lwz r9,__cache_line_size@l(r9)
130
bne+ cr5,L(cachelineset)
139
cmpw cr5,r5,r10 /* Less than a cacheline to go? */
141
neg r7,r6 /* How far to next cacheline bdy? */
143
addi r6,r6,-8 /* prepare for stdu */
145
addi r4,r4,-8 /* prepare for ldu */
148
ble+ cr5,L(lessthancacheline)
150
beq- cr0,L(big_lines) /* 128 byte line code */
155
/* More than a cacheline left to go, and using 64 byte cachelines */
157
clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
159
cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
161
/* Reduce total len by what it takes to get to the next cache line */
163
srwi r7,r7,4 /* How many qws to get to the line bdy? */
165
/* How many full cache lines to copy after getting to a line bdy? */
168
cmplwi r10,0 /* If no full cache lines to copy ... */
169
li r11,0 /* number cachelines to copy with prefetch */
170
beq L(nocacheprefetch)
173
/* We are here because we have at least one full cache line to copy,
174
and therefore some pre-touching to do. */
176
cmplwi r10,PREFETCH_AHEAD
177
li r12,64+8 /* prefetch distance */
178
ble L(lessthanmaxprefetch)
180
/* We can only do so much pre-fetching. R11 will have the count of
181
lines left to prefetch after the initial batch of prefetches
184
subi r11,r10,PREFETCH_AHEAD
185
li r10,PREFETCH_AHEAD
187
L(lessthanmaxprefetch):
190
/* At this point r10/ctr hold the number of lines to prefetch in this
191
initial batch, and r11 holds any remainder. */
199
/* Prefetching is done, or was not needed.
201
cr6 - are we on a cacheline boundary already?
202
r7 - number of quadwords to the next cacheline boundary
208
cmplwi cr1,r5,64 /* Less than a cache line to copy? */
210
/* How many bytes are left after we copy whatever full
211
cache lines we can get? */
214
beq cr6,L(cachelinealigned)
217
/* Copy quadwords up to the next cacheline boundary */
224
bdnz L(aligntocacheline)
228
L(cachelinealigned): /* copy while cache lines */
230
blt- cr1,L(lessthancacheline) /* size <64 */
237
li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
240
/* Copy whole cachelines, optimized by prefetching SRC cacheline */
241
L(loop): /* Copy aligned body */
242
dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
269
L(loop2): /* Copy aligned body */
292
L(lessthancacheline): /* Was there less than cache to do ? */
294
srwi r7,r5,4 /* divide size by 16 */
303
bdnz L(copy_remaining)
305
L(do_lt16): /* less than 16 ? */
306
cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
307
beqlr+ /* no rest to copy */
311
L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
315
lfdx fp9,r7,r6 /* copy 8 byte */
320
lwzx r0,r7,r6 /* copy 4 byte */
325
lhzx r0,r7,r6 /* copy 2 byte */
330
lbzx r0,r7,r6 /* copy 1 byte */
339
/* Similar to above, but for use with 128 byte lines. */
344
clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
346
cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
348
/* Reduce total len by what it takes to get to the next cache line */
350
srwi r7,r7,4 /* How many qw to get to the line bdy? */
352
/* How many full cache lines to copy after getting to a line bdy? */
355
cmplwi r10,0 /* If no full cache lines to copy ... */
356
li r11,0 /* number cachelines to copy with prefetch */
357
beq L(nocacheprefetch_128)
360
/* We are here because we have at least one full cache line to copy,
361
and therefore some pre-touching to do. */
363
cmplwi r10,PREFETCH_AHEAD
364
li r12,128+8 /* prefetch distance */
365
ble L(lessthanmaxprefetch_128)
367
/* We can only do so much pre-fetching. R11 will have the count of
368
lines left to prefetch after the initial batch of prefetches
371
subi r11,r10,PREFETCH_AHEAD
372
li r10,PREFETCH_AHEAD
374
L(lessthanmaxprefetch_128):
377
/* At this point r10/ctr hold the number of lines to prefetch in this
378
initial batch, and r11 holds any remainder. */
383
bdnz L(prefetchSRC_128)
386
/* Prefetching is done, or was not needed.
388
cr6 - are we on a cacheline boundary already?
389
r7 - number of quadwords to the next cacheline boundary
392
L(nocacheprefetch_128):
395
cmplwi cr1,r5,128 /* Less than a cache line to copy? */
397
/* How many bytes are left after we copy whatever full
398
cache lines we can get? */
401
beq cr6,L(cachelinealigned_128)
404
/* Copy quadwords up to the next cacheline boundary */
406
L(aligntocacheline_128):
411
bdnz L(aligntocacheline_128)
414
L(cachelinealigned_128): /* copy while cache lines */
416
blt- cr1,L(lessthancacheline) /* size <128 */
423
li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
426
/* Copy whole cachelines, optimized by prefetching SRC cacheline */
427
L(loop_128): /* Copy aligned body */
428
dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
471
L(loop2_128): /* Copy aligned body */
507
b L(lessthancacheline)
510
END (BP_SYM (memcpy))
511
libc_hidden_builtin_def (memcpy)