patch-2.3.6 linux/arch/alpha/lib/memcpy.c
Next file: linux/arch/arm/kernel/ptrace.c
Previous file: linux/arch/alpha/kernel/ptrace.c
Back to the patch index
Back to the overall index
- Lines: 155
- Date:
Mon Jun 7 11:37:13 1999
- Orig file:
v2.3.5/linux/arch/alpha/lib/memcpy.c
- Orig date:
Tue Apr 7 08:05:05 1998
diff -u --recursive --new-file v2.3.5/linux/arch/alpha/lib/memcpy.c linux/arch/alpha/lib/memcpy.c
@@ -21,30 +21,44 @@
* This should be done in one go with ldq_u*2/mask/stq_u. Do it
* with a macro so that we can fix it up later..
*/
-#define ALIGN_DEST_TO8(d,s,n) \
+#define ALIGN_DEST_TO8_UP(d,s,n) \
while (d & 7) { \
if (n <= 0) return; \
n--; \
*(char *) d = *(char *) s; \
d++; s++; \
}
+#define ALIGN_DEST_TO8_DN(d,s,n) \
+ while (d & 7) { \
+ if (n <= 0) return; \
+ n--; \
+ d--; s--; \
+ *(char *) d = *(char *) s; \
+ }
/*
* This should similarly be done with ldq_u*2/mask/stq. The destination
* is aligned, but we don't fill in a full quad-word
*/
-#define DO_REST(d,s,n) \
+#define DO_REST_UP(d,s,n) \
while (n > 0) { \
n--; \
*(char *) d = *(char *) s; \
d++; s++; \
}
+#define DO_REST_DN(d,s,n) \
+ while (n > 0) { \
+ n--; \
+ d--; s--; \
+ *(char *) d = *(char *) s; \
+ }
/*
* This should be done with ldq/mask/stq. The source and destination are
* aligned, but we don't fill in a full quad-word
*/
-#define DO_REST_ALIGNED(d,s,n) DO_REST(d,s,n)
+#define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n)
+#define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n)
/*
* This does unaligned memory copies. We want to avoid storing to
@@ -53,9 +67,10 @@
*
* Note the ordering to try to avoid load (and address generation) latencies.
*/
-static inline void __memcpy_unaligned(unsigned long d, unsigned long s, long n)
+static inline void __memcpy_unaligned_up (unsigned long d, unsigned long s,
+ long n)
{
- ALIGN_DEST_TO8(d,s,n);
+ ALIGN_DEST_TO8_UP(d,s,n);
n -= 8; /* to avoid compare against 8 in the loop */
if (n >= 0) {
unsigned long low_word, high_word;
@@ -77,7 +92,17 @@
} while (n >= 0);
}
n += 8;
- DO_REST(d,s,n);
+ DO_REST_UP(d,s,n);
+}
+
+static inline void __memcpy_unaligned_dn (unsigned long d, unsigned long s,
+ long n)
+{
+ /* I don't understand AXP assembler well enough for this. -Tim */
+ s += n;
+ d += n;
+ while (n--)
+ * (char *) --d = * (char *) --s;
}
/*
@@ -88,9 +113,10 @@
*
* Note the ordering to try to avoid load (and address generation) latencies.
*/
-static inline void __memcpy_aligned(unsigned long d, unsigned long s, long n)
+static inline void __memcpy_aligned_up (unsigned long d, unsigned long s,
+ long n)
{
- ALIGN_DEST_TO8(d,s,n);
+ ALIGN_DEST_TO8_UP(d,s,n);
n -= 8;
while (n >= 0) {
unsigned long tmp;
@@ -101,18 +127,58 @@
d += 8;
}
n += 8;
- DO_REST_ALIGNED(d,s,n);
+ DO_REST_ALIGNED_UP(d,s,n);
+}
+static inline void __memcpy_aligned_dn (unsigned long d, unsigned long s,
+ long n)
+{
+ s += n;
+ d += n;
+ ALIGN_DEST_TO8_DN(d,s,n);
+ n -= 8;
+ while (n >= 0) {
+ unsigned long tmp;
+ s -= 8;
+ __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s));
+ n -= 8;
+ d -= 8;
+ *(unsigned long *) d = tmp;
+ }
+ n += 8;
+ DO_REST_ALIGNED_DN(d,s,n);
}
void * memcpy(void * dest, const void *src, size_t n)
{
if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) {
- __memcpy_aligned((unsigned long) dest, (unsigned long) src, n);
+ __memcpy_aligned_up ((unsigned long) dest, (unsigned long) src,
+ n);
return dest;
}
- __memcpy_unaligned((unsigned long) dest, (unsigned long) src, n);
+ __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
return dest;
}
/* For backward modules compatibility, define __memcpy. */
asm("__memcpy = memcpy; .globl __memcpy");
+
+void *memmove (void *dest, const void *src, size_t n)
+{
+ if (dest <= src) {
+ if (!(((unsigned long) dest ^ (unsigned long) src) & 7))
+ __memcpy_aligned_up ((unsigned long) dest,
+ (unsigned long) src, n);
+ else
+ __memcpy_unaligned_up ((unsigned long) dest,
+ (unsigned long) src, n);
+ }
+ else {
+ if (!(((unsigned long) dest ^ (unsigned long) src) & 7))
+ __memcpy_aligned_dn ((unsigned long) dest,
+ (unsigned long) src, n);
+ else
+ __memcpy_unaligned_dn ((unsigned long) dest,
+ (unsigned long) src, n);
+ }
+ return dest;
+}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)