(memcpy): Optimize better for smaller than 256 byte copies. Also, use only one unrolled loop instead of two for the large copy case.

This commit is contained in:
Ulrich Drepper 2004-07-31 17:14:51 +00:00
parent 80574c92d7
commit 3ee3a00222
1 changed files with 228 additions and 333 deletions

View File

@ -20,32 +20,36 @@
02111-1307 USA. */ 02111-1307 USA. */
#include <sysdep.h> #include <sysdep.h>
#define ASI_BLK_P 0xf0 #define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04 #define FPRS_FEF 0x04
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#define SMALL_COPY_USES_FPU
#ifndef XCC #ifndef XCC
#define USE_BPR #define USE_BPR
#define XCC xcc #define XCC xcc
#endif #endif
.register %g2,#scratch
.register %g3,#scratch
.register %g6,#scratch
.text .text
.align 32 .align 32
ENTRY(bcopy) ENTRY(bcopy)
sub %o1, %o0, %o4 /* IEU0 Group */ sub %o1, %o0, %o4
mov %o0, %g3 /* IEU1 */ mov %o0, %g4
cmp %o4, %o2 /* IEU1 Group */ cmp %o4, %o2
mov %o1, %o0 /* IEU0 */ mov %o1, %o0
bgeu,pt %XCC, 100f /* CTI */ bgeu,pt %XCC, 100f
mov %g3, %o1 /* IEU0 Group */ mov %g4, %o1
#ifndef USE_BPR #ifndef USE_BPR
srl %o2, 0, %o2 /* IEU1 */ srl %o2, 0, %o2
#endif #endif
brnz,pn %o2, 220f /* CTI Group */ brnz,pn %o2, 220f
add %o0, %o2, %o0 /* IEU0 */ add %o0, %o2, %o0
retl retl
nop nop
END(bcopy) END(bcopy)
@ -68,373 +72,264 @@ END(bcopy)
ENTRY(memcpy) ENTRY(memcpy)
100: /* %o0=dst, %o1=src, %o2=len */ 100: /* %o0=dst, %o1=src, %o2=len */
#ifndef __KERNEL__ mov %o0, %g5
/* Save away original 'dst' for memcpy return value. */ cmp %o2, 0
mov %o0, %g3 ! A0 Group be,pn %XCC, out
#endif 218: or %o0, %o1, %o3
/* Anything to copy at all? */ cmp %o2, 16
cmp %o2, 0 ! A1 bleu,a,pn %XCC, small_copy
ble,pn %XCC, 102f ! BR or %o3, %o2, %o3
/* Extremely small copy? */ cmp %o2, 256
218: cmp %o2, 31 ! A0 Group blu,pt %XCC, medium_copy
ble,pn %XCC, 101f ! BR andcc %o3, 0x7, %g0
/* Large enough to use unrolled prefetch loops? */ ba,pt %xcc, enter
cmp %o2, 0x100 ! A1 andcc %o0, 0x3f, %g2
bge,a,pt %XCC, 103f ! BR Group
andcc %o0, 0x3f, %g2 ! A0
ba,pt %XCC, 108f ! BR Group /* Here len >= 256 and condition codes reflect execution
andcc %o0, 0x7, %g2 ! A0
.align 32
101:
/* Copy %o2 bytes from src to dst, one byte at a time. */
ldub [%o1 + 0x00], %o3 ! MS Group
add %o1, 0x1, %o1 ! A0
add %o0, 0x1, %o0 ! A1
subcc %o2, 1, %o2 ! A0 Group
bg,pt %XCC, 101b ! BR
stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
102:
#ifdef __KERNEL__
retl ! BR Group (0-4 cycle stall)
clr %o0 ! A0
#else
retl ! BR Group (0-4 cycle stall)
mov %g3, %o0 ! A0
#endif
/* Here len >= (6 * 64) and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller. * of "andcc %o0, 0x7, %g2", done by caller.
*/ */
.align 64 .align 64
103: enter:
/* Is 'dst' already aligned on an 64-byte boundary? */ /* Is 'dst' already aligned on an 64-byte boundary? */
be,pt %XCC, 2f ! BR be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre- * of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'. * subtract this from 'len'.
*/ */
sub %g2, 0x40, %g2 ! A0 Group sub %g2, 0x40, %g2
sub %g0, %g2, %g2 ! A0 Group sub %g0, %g2, %g2
sub %o2, %g2, %o2 ! A0 Group sub %o2, %g2, %o2
/* Copy %g2 bytes from src to dst, one byte at a time. */ /* Copy %g2 bytes from src to dst, one byte at a time. */
1: ldub [%o1 + 0x00], %o3 ! MS (Group) 1: ldub [%o1 + 0x00], %o3
add %o1, 0x1, %o1 ! A1 add %o1, 0x1, %o1
add %o0, 0x1, %o0 ! A0 Group add %o0, 0x1, %o0
subcc %g2, 0x1, %g2 ! A1 subcc %g2, 0x1, %g2
bg,pt %XCC, 1b ! BR Group bg,pt %XCC, 1b
stb %o3, [%o0 + -1] ! MS Group stb %o3, [%o0 + -1]
2: VISEntryHalf ! MS+MS 2: VISEntryHalf
and %o1, 0x7, %g1 ! A1 and %o1, 0x7, %g1
ba,pt %XCC, 104f ! BR ba,pt %xcc, begin
alignaddr %o1, %g0, %o1 ! MS (Break-after) alignaddr %o1, %g0, %o1
.align 64 .align 64
104: begin:
prefetch [%o1 + 0x000], #one_read ! MS Group1 prefetch [%o1 + 0x000], #one_read
prefetch [%o1 + 0x040], #one_read ! MS Group2 prefetch [%o1 + 0x040], #one_read
andn %o2, (0x40 - 1), %o4 ! A0 andn %o2, (0x40 - 1), %o4
prefetch [%o1 + 0x080], #one_read ! MS Group3 prefetch [%o1 + 0x080], #one_read
cmp %o4, 0x140 ! A0 prefetch [%o1 + 0x0c0], #one_read
prefetch [%o1 + 0x0c0], #one_read ! MS Group4 ldd [%o1 + 0x000], %f0
ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8) prefetch [%o1 + 0x100], #one_read
bge,a,pt %XCC, 1f ! BR ldd [%o1 + 0x008], %f2
prefetch [%o1 + 0x140], #one_read
ldd [%o1 + 0x010], %f4
prefetch [%o1 + 0x180], #one_read
faligndata %f0, %f2, %f16
ldd [%o1 + 0x018], %f6
faligndata %f2, %f4, %f18
ldd [%o1 + 0x020], %f8
faligndata %f4, %f6, %f20
ldd [%o1 + 0x028], %f10
faligndata %f6, %f8, %f22
prefetch [%o1 + 0x100], #one_read ! MS Group6 ldd [%o1 + 0x030], %f12
1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9) faligndata %f8, %f10, %f24
cmp %o4, 0x180 ! A1 ldd [%o1 + 0x038], %f14
bge,a,pt %XCC, 1f ! BR faligndata %f10, %f12, %f26
prefetch [%o1 + 0x140], #one_read ! MS Group7 ldd [%o1 + 0x040], %f0
1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
cmp %o4, 0x1c0 ! A1
bge,a,pt %XCC, 1f ! BR
prefetch [%o1 + 0x180], #one_read ! MS Group8 sub %o4, 0x80, %o4
1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12) add %o1, 0x40, %o1
ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12) ba,pt %xcc, loop
faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13) srl %o4, 6, %o3
ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16) .align 64
faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18) loop:
ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18) ldd [%o1 + 0x008], %f2
faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19) faligndata %f12, %f14, %f28
ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19) ldd [%o1 + 0x010], %f4
faligndata %f14, %f0, %f30
stda %f16, [%o0] ASI_BLK_P
ldd [%o1 + 0x018], %f6
faligndata %f0, %f2, %f16
/* We only use the first loop if len > (7 * 64). */ ldd [%o1 + 0x020], %f8
subcc %o4, 0x1c0, %o4 ! A0 Group17 faligndata %f2, %f4, %f18
bg,pt %XCC, 105f ! BR ldd [%o1 + 0x028], %f10
add %o1, 0x40, %o1 ! A1 faligndata %f4, %f6, %f20
ldd [%o1 + 0x030], %f12
faligndata %f6, %f8, %f22
ldd [%o1 + 0x038], %f14
faligndata %f8, %f10, %f24
add %o4, 0x140, %o4 ! A0 Group18 ldd [%o1 + 0x040], %f0
ba,pt %XCC, 106f ! BR prefetch [%o1 + 0x180], #one_read
srl %o4, 6, %o3 ! A0 Group19 faligndata %f10, %f12, %f26
nop subcc %o3, 0x01, %o3
nop add %o1, 0x40, %o1
nop bg,pt %XCC, loop
nop add %o0, 0x40, %o0
nop
nop
nop
/* This loop performs the copy and queues new prefetches.
* We drop into the second loop when len <= (5 * 64). Note
* that this (5 * 64) factor has been subtracted from len
* already.
*/
105:
ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
stda %f16, [%o0] ASI_BLK_P ! MS
ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
prefetch [%o1 + 0x180], #one_read ! MS
faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
subcc %o4, 0x40, %o4 ! A0
add %o1, 0x40, %o1 ! A1
bg,pt %XCC, 105b ! BR
add %o0, 0x40, %o0 ! A0 Group18
mov 5, %o3 ! A1
/* This loop performs on the copy, no new prefetches are
* queued. We do things this way so that we do not perform
* any spurious prefetches past the end of the src buffer.
*/
106:
ldd [%o1 + 0x008], %f2 ! MS
faligndata %f12, %f14, %f28 ! FGA Group2
ldd [%o1 + 0x010], %f4 ! MS
faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
stda %f16, [%o0] ASI_BLK_P ! MS
ldd [%o1 + 0x018], %f6 ! AX
faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
ldd [%o1 + 0x020], %f8 ! MS
faligndata %f2, %f4, %f18 ! FGA Group13
ldd [%o1 + 0x028], %f10 ! MS
faligndata %f4, %f6, %f20 ! FGA Group14
ldd [%o1 + 0x030], %f12 ! MS
faligndata %f6, %f8, %f22 ! FGA Group15
ldd [%o1 + 0x038], %f14 ! MS
faligndata %f8, %f10, %f24 ! FGA Group16
ldd [%o1 + 0x040], %f0 ! AX
faligndata %f10, %f12, %f26 ! FGA Group17
subcc %o3, 0x01, %o3 ! A0
add %o1, 0x40, %o1 ! A1
bg,pt %XCC, 106b ! BR
add %o0, 0x40, %o0 ! A0 Group18
/* Finally we copy the last full 64-byte block. */ /* Finally we copy the last full 64-byte block. */
ldd [%o1 + 0x008], %f2 ! MS loopfini:
faligndata %f12, %f14, %f28 ! FGA ldd [%o1 + 0x008], %f2
ldd [%o1 + 0x010], %f4 ! MS Group19 faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30 ! FGA ldd [%o1 + 0x010], %f4
stda %f16, [%o0] ASI_BLK_P ! MS Group20 faligndata %f14, %f0, %f30
ldd [%o1 + 0x018], %f6 ! AX stda %f16, [%o0] ASI_BLK_P
faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall) ldd [%o1 + 0x018], %f6
ldd [%o1 + 0x020], %f8 ! MS faligndata %f0, %f2, %f16
faligndata %f2, %f4, %f18 ! FGA Group12 ldd [%o1 + 0x020], %f8
ldd [%o1 + 0x028], %f10 ! MS faligndata %f2, %f4, %f18
faligndata %f4, %f6, %f20 ! FGA Group13 ldd [%o1 + 0x028], %f10
ldd [%o1 + 0x030], %f12 ! MS faligndata %f4, %f6, %f20
faligndata %f6, %f8, %f22 ! FGA Group14 ldd [%o1 + 0x030], %f12
ldd [%o1 + 0x038], %f14 ! MS faligndata %f6, %f8, %f22
faligndata %f8, %f10, %f24 ! FGA Group15 ldd [%o1 + 0x038], %f14
cmp %g1, 0 ! A0 faligndata %f8, %f10, %f24
be,pt %XCC, 1f ! BR cmp %g1, 0
add %o0, 0x40, %o0 ! A1 be,pt %XCC, 1f
ldd [%o1 + 0x040], %f0 ! MS add %o0, 0x40, %o0
1: faligndata %f10, %f12, %f26 ! FGA Group16 ldd [%o1 + 0x040], %f0
faligndata %f12, %f14, %f28 ! FGA Group17 1: faligndata %f10, %f12, %f26
faligndata %f14, %f0, %f30 ! FGA Group18 faligndata %f12, %f14, %f28
stda %f16, [%o0] ASI_BLK_P ! MS faligndata %f14, %f0, %f30
add %o0, 0x40, %o0 ! A0 stda %f16, [%o0] ASI_BLK_P
add %o1, 0x40, %o1 ! A1 add %o0, 0x40, %o0
membar #Sync ! MS Group26 (7-cycle stall) add %o1, 0x40, %o1
membar #Sync
/* Now we copy the (len modulo 64) bytes at the end. /* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above. * Note how we borrow the %f0 loaded above.
* *
* Also notice how this code is careful not to perform a * Also notice how this code is careful not to perform a
* load past the end of the src buffer just like similar * load past the end of the src buffer.
* code found in 'toosmall' processing.
*/ */
and %o2, 0x3f, %o2 ! A0 Group loopend:
andcc %o2, 0x38, %g2 ! A0 Group and %o2, 0x3f, %o2
be,pn %XCC, 107f ! BR andcc %o2, 0x38, %g2
subcc %g2, 0x8, %g2 ! A1 be,pn %XCC, endcruft
be,pn %XCC, 107f ! BR Group subcc %g2, 0x8, %g2
cmp %g1, 0 ! A0 be,pn %XCC, endcruft
cmp %g1, 0
be,a,pt %XCC, 1f ! BR Group be,a,pt %XCC, 1f
ldd [%o1 + 0x00], %f0 ! MS ldd [%o1 + 0x00], %f0
1: ldd [%o1 + 0x08], %f2 ! MS Group 1: ldd [%o1 + 0x08], %f2
add %o1, 0x8, %o1 ! A0 add %o1, 0x8, %o1
sub %o2, 0x8, %o2 ! A1 sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2 ! A0 Group subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8 ! FGA Group faligndata %f0, %f2, %f8
std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX) std %f8, [%o0 + 0x00]
be,pn %XCC, 107f ! BR be,pn %XCC, endcruft
add %o0, 0x8, %o0 ! A0 add %o0, 0x8, %o0
ldd [%o1 + 0x08], %f0 ! MS Group ldd [%o1 + 0x08], %f0
add %o1, 0x8, %o1 ! A0 add %o1, 0x8, %o1
sub %o2, 0x8, %o2 ! A1 sub %o2, 0x8, %o2
subcc %g2, 0x8, %g2 ! A0 Group subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8 ! FGA faligndata %f2, %f0, %f8
std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX) std %f8, [%o0 + 0x00]
bne,pn %XCC, 1b ! BR bne,pn %XCC, 1b
add %o0, 0x8, %o0 ! A0 Group add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time. /* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the * Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed. * alignaddr was performed.
*/ */
107: endcruft:
cmp %o2, 0 cmp %o2, 0
add %o1, %g1, %o1 add %o1, %g1, %o1
VISExitHalf VISExitHalf
be,pn %XCC, 102b be,pn %XCC, out
sub %o0, %o1, %o3
andcc %g1, 0x7, %g0
bne,pn %icc, small_copy_unaligned
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop nop
ba,a,pt %XCC, 101b ldx [%o1], %o5
stx %o5, [%o1 + %o3]
/* If we get here, then 32 <= len < (6 * 64) */
108:
#ifdef SMALL_COPY_USES_FPU
/* Is 'dst' already aligned on an 8-byte boundary? */
be,pt %XCC, 2f ! BR Group
/* Compute abs((dst & 7) - 8) into %g2. This is the number
* of bytes to copy to make 'dst' 8-byte aligned. We pre-
* subtract this from 'len'.
*/
sub %g2, 0x8, %g2 ! A0
sub %g0, %g2, %g2 ! A0 Group (reg-dep)
sub %o2, %g2, %o2 ! A0 Group (reg-dep)
/* Copy %g2 bytes from src to dst, one byte at a time. */
1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
add %o1, 0x1, %o1 ! A1
add %o0, 0x1, %o0 ! A0 Group
subcc %g2, 0x1, %g2 ! A1
bg,pt %XCC, 1b ! BR Group
stb %o3, [%o0 + -1] ! MS Group
2: VISEntryHalf ! MS+MS
/* Compute (len - (len % 8)) into %g2. This is guarenteed
* to be nonzero.
*/
andn %o2, 0x7, %g2 ! A0 Group
/* You may read this and believe that it allows reading
* one 8-byte longword past the end of src. It actually
* does not, as %g2 is subtracted as loads are done from
* src, so we always stop before running off the end.
* Also, we are guarenteed to have at least 0x10 bytes
* to move here.
*/
sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
alignaddr %o1, %g0, %g1 ! MS (Break-after)
ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
add %g1, 0x8, %g1 ! A0
1: ldd [%g1 + 0x00], %f2 ! MS Group
add %g1, 0x8, %g1 ! A0
sub %o2, 0x8, %o2 ! A1
subcc %g2, 0x8, %g2 ! A0 Group
faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
add %o1, 0x8, %o1 ! A0
be,pn %XCC, 2f ! BR
add %o0, 0x8, %o0 ! A1
ldd [%g1 + 0x00], %f0 ! MS Group
add %g1, 0x8, %g1 ! A0
sub %o2, 0x8, %o2 ! A1
subcc %g2, 0x8, %g2 ! A0 Group
faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
add %o1, 0x8, %o1 ! A0
bne,pn %XCC, 1b ! BR
add %o0, 0x8, %o0 ! A1
/* Nothing left to copy? */
2: cmp %o2, 0 ! A0 Group
VISExitHalf ! A0+MS
be,pn %XCC, 102b ! BR Group
nop ! A0
ba,a,pt %XCC, 101b ! BR Group
#else /* !(SMALL_COPY_USES_FPU) */
xor %o1, %o0, %g2
andcc %g2, 0x7, %g0
bne,pn %XCC, 101b
andcc %o1, 0x7, %g2
be,pt %XCC, 2f
sub %g2, 0x8, %g2
sub %g0, %g2, %g2
sub %o2, %g2, %o2
1: ldub [%o1 + 0x00], %o3
add %o1, 0x1, %o1
add %o0, 0x1, %o0
subcc %g2, 0x1, %g2
bg,pt %XCC, 1b
stb %o3, [%o0 + -1]
2: andn %o2, 0x7, %g2
sub %o2, %g2, %o2
3: ldx [%o1 + 0x00], %o3
add %o1, 0x8, %o1 add %o1, 0x8, %o1
add %o0, 0x8, %o0
subcc %g2, 0x8, %g2
bg,pt %XCC, 3b
stx %o3, [%o0 + -8]
cmp %o2, 0 1: andcc %o2, 0x4, %g0
bne,pn %XCC, 101b be,pt %icc, 1f
nop nop
ba,a,pt %XCC, 102b lduw [%o1], %o5
stw %o5, [%o1 + %o3]
add %o1, 0x4, %o1
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
lduh [%o1], %o5
sth %o5, [%o1 + %o3]
add %o1, 0x2, %o1
1: andcc %o2, 0x1, %g0
be,pt %icc, out
nop
ldub [%o1], %o5
ba,pt %xcc, out
stb %o5, [%o1 + %o3]
medium_copy: /* 16 < len <= 64 */
bne,pn %XCC, small_copy_unaligned
sub %o0, %o1, %o3
medium_copy_aligned:
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
ldx [%o1], %o5
stx %o5, [%o1 + %o3]
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
lduw [%o1], %o5
stw %o5, [%o1 + %o3]
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, out
nop
ba,pt %xcc, small_copy_unaligned
nop
small_copy: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, small_copy_unaligned
sub %o0, %o1, %o3
small_copy_aligned:
subcc %o2, 4, %o2
lduw [%o1], %g1
stw %g1, [%o1 + %o3]
bgu,pt %XCC, small_copy_aligned
add %o1, 4, %o1
out: retl
mov %g5, %o0
.align 32
small_copy_unaligned:
subcc %o2, 1, %o2
ldub [%o1], %g1
stb %g1, [%o1 + %o3]
bgu,pt %XCC, small_copy_unaligned
add %o1, 1, %o1
retl
mov %g5, %o0
#endif /* !(SMALL_COPY_USES_FPU) */
END(memcpy) END(memcpy)
#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \ #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
@ -511,11 +406,11 @@ END(memcpy)
.align 32 .align 32
ENTRY(memmove) ENTRY(memmove)
mov %o0, %g3 mov %o0, %g5
#ifndef USE_BPR #ifndef USE_BPR
srl %o2, 0, %o2 /* IEU1 Group */ srl %o2, 0, %o2 /* IEU1 Group */
#endif #endif
brz,pn %o2, 102b /* CTI Group */ brz,pn %o2, out /* CTI Group */
sub %o0, %o1, %o4 /* IEU0 */ sub %o0, %o1, %o4 /* IEU0 */
cmp %o4, %o2 /* IEU1 Group */ cmp %o4, %o2 /* IEU1 Group */
bgeu,pt %XCC, 218b /* CTI */ bgeu,pt %XCC, 218b /* CTI */