2
* linux/arch/alpha/lib/memcpy.c
4
* Copyright (C) 1995 Linus Torvalds
8
* This is a reasonably optimized memcpy() routine.
12
* Note that the C code is written to be optimized into good assembly. However,
13
* at this point gcc is unable to sanely compile "if (n >= 0)", resulting in a
14
* explicit compare against 0 (instead of just using the proper "blt reg, xx" or
15
* "bge reg, xx"). I hope alpha-gcc will be fixed to notice this eventually..
18
#include <linux/types.h>
21
* This should be done in one go with ldq_u*2/mask/stq_u. Do it
22
* with a macro so that we can fix it up later..
24
#define ALIGN_DEST_TO8_UP(d,s,n) \
28
*(char *) d = *(char *) s; \
31
#define ALIGN_DEST_TO8_DN(d,s,n) \
36
*(char *) d = *(char *) s; \
40
* This should similarly be done with ldq_u*2/mask/stq. The destination
41
* is aligned, but we don't fill in a full quad-word
43
#define DO_REST_UP(d,s,n) \
46
*(char *) d = *(char *) s; \
49
#define DO_REST_DN(d,s,n) \
53
*(char *) d = *(char *) s; \
57
* This should be done with ldq/mask/stq. The source and destination are
58
* aligned, but we don't fill in a full quad-word
60
#define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n)
61
#define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n)
64
* This does unaligned memory copies. We want to avoid storing to
65
* an unaligned address, as that would do a read-modify-write cycle.
66
* We also want to avoid double-reading the unaligned reads.
68
* Note the ordering to try to avoid load (and address generation) latencies.
70
static inline void __memcpy_unaligned_up (unsigned long d, unsigned long s,
73
ALIGN_DEST_TO8_UP(d,s,n);
74
n -= 8; /* to avoid compare against 8 in the loop */
76
unsigned long low_word, high_word;
77
__asm__("ldq_u %0,%1":"=r" (low_word):"m" (*(unsigned long *) s));
80
__asm__("ldq_u %0,%1":"=r" (high_word):"m" (*(unsigned long *)(s+8)));
82
__asm__("extql %1,%2,%0"
84
:"r" (low_word), "r" (s));
85
__asm__("extqh %1,%2,%0"
87
:"r" (high_word), "r" (s));
89
*(unsigned long *) d = low_word | tmp;
98
static inline void __memcpy_unaligned_dn (unsigned long d, unsigned long s,
101
/* I don't understand AXP assembler well enough for this. -Tim */
105
* (char *) --d = * (char *) --s;
109
* Hmm.. Strange. The __asm__ here is there to make gcc use an integer register
110
* for the load-store. I don't know why, but it would seem that using a floating
111
* point register for the move seems to slow things down (very small difference,
114
* Note the ordering to try to avoid load (and address generation) latencies.
116
static inline void __memcpy_aligned_up (unsigned long d, unsigned long s,
119
ALIGN_DEST_TO8_UP(d,s,n);
123
__asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s));
126
*(unsigned long *) d = tmp;
130
DO_REST_ALIGNED_UP(d,s,n);
132
static inline void __memcpy_aligned_dn (unsigned long d, unsigned long s,
137
ALIGN_DEST_TO8_DN(d,s,n);
142
__asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s));
145
*(unsigned long *) d = tmp;
148
DO_REST_ALIGNED_DN(d,s,n);
151
void * memcpy(void * dest, const void *src, size_t n)
153
if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) {
154
__memcpy_aligned_up ((unsigned long) dest, (unsigned long) src,
158
__memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
162
/* For backward modules compatibility, define __memcpy. */
163
asm("__memcpy = memcpy; .globl __memcpy");