aboutsummaryrefslogtreecommitdiffstats
path: root/libc/string/memmove.c
diff options
context:
space:
mode:
authorJohannes Carlsson <johannes.carlsson.x@sonyericsson.com>2011-02-03 15:16:15 +0100
committerJohan Redestig <johan.redestig@sonyericsson.com>2011-02-03 15:17:13 +0100
commit0f67de14e605f519d74ed6fff67c6712158459d3 (patch)
treea68c0e239c76d60c6d5dc6f193a485b04553cb35 /libc/string/memmove.c
parent9a3305128920e0ff018d267d1bf4f5e58a5146e5 (diff)
downloadandroid_bionic-0f67de14e605f519d74ed6fff67c6712158459d3.tar.gz
android_bionic-0f67de14e605f519d74ed6fff67c6712158459d3.tar.bz2
android_bionic-0f67de14e605f519d74ed6fff67c6712158459d3.zip
Use more optimized version of memmove
On ARM there is currently no assembler optimized memmove in libc. There is however a more optimized bcopy which copies long instead of bytes where possible. This almost doubles the performance in best case. Change-Id: I1f1cd27529443358047c385730deaf938ce4e642
Diffstat (limited to 'libc/string/memmove.c')
-rw-r--r--libc/string/memmove.c37
1 files changed, 3 insertions, 34 deletions
diff --git a/libc/string/memmove.c b/libc/string/memmove.c
index 98ecfc90b..072104b6c 100644
--- a/libc/string/memmove.c
+++ b/libc/string/memmove.c
@@ -26,6 +26,7 @@
* SUCH DAMAGE.
*/
#include <string.h>
+#include <strings.h>
void *memmove(void *dst, const void *src, size_t n)
{
@@ -37,39 +38,7 @@ void *memmove(void *dst, const void *src, size_t n)
if (__builtin_expect((q < p) || ((size_t)(q - p) >= n), 1)) {
return memcpy(dst, src, n);
} else {
-#define PRELOAD_DISTANCE 64
- /* a semi-optimized memmove(). we're preloading the src and dst buffers
- * as we go */
- size_t c0, c1, i;
- p += n;
- q += n;
- /* note: we preload the destination as well, because the 1-byte at a time
- * copy below doesn't take advantage of the write-buffer, we need
- * to use the cache instead as a poor man's write-combiner */
- __builtin_prefetch(p-1);
- __builtin_prefetch(q-1);
- if (PRELOAD_DISTANCE > 32) {
- __builtin_prefetch(p-(32+1));
- __builtin_prefetch(q-(32+1));
- }
- /* do the prefetech as soon as possible, prevent the compiler to
- * reorder the instructions above the prefetch */
- asm volatile("":::"memory");
- c0 = n & 0x1F; /* cache-line is 32 bytes */
- c1 = n >> 5;
- while ( c1-- ) {
- /* ARMv6 can have up to 3 memory access outstanding */
- __builtin_prefetch(p - (PRELOAD_DISTANCE+1));
- __builtin_prefetch(q - (PRELOAD_DISTANCE+1));
- asm volatile("":::"memory");
- for (i=0 ; i<32 ; i++) {
- *--q = *--p;
- }
- }
- while ( c0-- ) {
- *--q = *--p;
- }
+ bcopy(src, dst, n);
+ return dst;
}
-
- return dst;
}