diff options
author | Carmelo Amoroso <carmelo.amoroso@st.com> | 2008-11-17 17:56:41 +0000 |
---|---|---|
committer | Carmelo Amoroso <carmelo.amoroso@st.com> | 2008-11-17 17:56:41 +0000 |
commit | 644c442da3f0cea2a2aa591f57726e3e6de823e2 (patch) | |
tree | f8d49598d16d350059059ce23b758da4afde7b15 /libc/string/powerpc/memcpy.c | |
parent | e0c796f1422f3617302e4ecafef6d1f461499eba (diff) | |
download | uClibc-alpine-644c442da3f0cea2a2aa591f57726e3e6de823e2.tar.bz2 uClibc-alpine-644c442da3f0cea2a2aa591f57726e3e6de823e2.tar.xz |
Synch with trunk @ 24075.
Step 4
libc/string and asm implementation
Diffstat (limited to 'libc/string/powerpc/memcpy.c')
-rw-r--r-- | libc/string/powerpc/memcpy.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/libc/string/powerpc/memcpy.c b/libc/string/powerpc/memcpy.c index dd39e6f9d..22794ec33 100644 --- a/libc/string/powerpc/memcpy.c +++ b/libc/string/powerpc/memcpy.c @@ -7,7 +7,7 @@ /* These are carefully optimized mem*() functions for PPC written in C. * Don't muck around with these function without checking the generated - * assmbler code. + * assembler code. * It is possible to optimize these significantly more by using specific * data cache instructions(mainly dcbz). However that requires knownledge * about the CPU's cache line size. @@ -21,16 +21,15 @@ #include <string.h> -/* Experimentally off - libc_hidden_proto(memcpy) */ -void *memcpy(void *to, const void *from, size_t n) -/* PPC can do pre increment and load/store, but not post increment and load/store. - Therefore use *++ptr instead of *ptr++. */ +/* PPC can do pre increment and load/store, but not post increment and + load/store. Therefore use *++ptr instead of *ptr++. */ +void *memcpy(void *to, const void *from, size_t len) { unsigned long rem, chunks, tmp1, tmp2; unsigned char *tmp_to; unsigned char *tmp_from = (unsigned char *)from; - chunks = n / 8; + chunks = len / 8; tmp_from -= 4; tmp_to = to - 4; if (!chunks) @@ -49,30 +48,33 @@ void *memcpy(void *to, const void *from, size_t n) *(unsigned long *)tmp_to = tmp2; } while (--chunks); lessthan8: - n = n % 8; - if (n >= 4) { - *(unsigned long *)(tmp_to+4) = *(unsigned long *)(tmp_from+4); + len = len % 8; + if (len >= 4) { tmp_from += 4; tmp_to += 4; - n = n-4; + *(unsigned long *)(tmp_to) = *(unsigned long *)(tmp_from); + len -= 4; } - if (!n ) return to; + if (!len) + return to; tmp_from += 3; tmp_to += 3; do { *++tmp_to = *++tmp_from; - } while (--n); + } while (--len); return to; align: + /* ???: Do we really need to generate the carry flag here? If not, then: + rem -= 4; */ rem = 4 - rem; - n = n - rem; + len -= rem; do { *(tmp_to+4) = *(tmp_from+4); ++tmp_from; ++tmp_to; } while (--rem); - chunks = n / 8; + chunks = len / 8; if (chunks) goto copy_chunks; goto lessthan8; |