mirror of git://sourceware.org/git/glibc.git
x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
1. Remove unnecessary spills. 2. Fix some small nit missed optimizations. All math and mathvec tests pass on x86.
This commit is contained in:
parent
d371be4b11
commit
72f6a5a0ed
|
@ -18,15 +18,34 @@
|
||||||
|
|
||||||
/* SSE2 ISA version as wrapper to scalar. */
|
/* SSE2 ISA version as wrapper to scalar. */
|
||||||
.macro WRAPPER_IMPL_SSE2 callee
|
.macro WRAPPER_IMPL_SSE2 callee
|
||||||
|
subq $24, %rsp
|
||||||
|
cfi_adjust_cfa_offset (24)
|
||||||
|
movaps %xmm0, (%rsp)
|
||||||
|
call JUMPTARGET(\callee)
|
||||||
|
movsd %xmm0, (%rsp)
|
||||||
|
movsd 8(%rsp), %xmm0
|
||||||
|
call JUMPTARGET(\callee)
|
||||||
|
movsd (%rsp), %xmm1
|
||||||
|
unpcklpd %xmm0, %xmm1
|
||||||
|
movaps %xmm1, %xmm0
|
||||||
|
addq $24, %rsp
|
||||||
|
cfi_adjust_cfa_offset (-24)
|
||||||
|
ret
|
||||||
|
.endm
|
||||||
|
|
||||||
|
|
||||||
|
/* 2 argument SSE2 ISA version as wrapper to scalar. */
|
||||||
|
.macro WRAPPER_IMPL_SSE2_ff callee
|
||||||
subq $40, %rsp
|
subq $40, %rsp
|
||||||
cfi_adjust_cfa_offset (40)
|
cfi_adjust_cfa_offset (40)
|
||||||
movaps %xmm0, (%rsp)
|
movaps %xmm0, (%rsp)
|
||||||
|
movaps %xmm1, 16(%rsp)
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movsd %xmm0, 16(%rsp)
|
movsd %xmm0, (%rsp)
|
||||||
movsd 8(%rsp), %xmm0
|
movsd 8(%rsp), %xmm0
|
||||||
|
movsd 24(%rsp), %xmm1
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movsd 16(%rsp), %xmm1
|
movsd (%rsp), %xmm1
|
||||||
movsd %xmm0, 24(%rsp)
|
|
||||||
unpcklpd %xmm0, %xmm1
|
unpcklpd %xmm0, %xmm1
|
||||||
movaps %xmm1, %xmm0
|
movaps %xmm1, %xmm0
|
||||||
addq $40, %rsp
|
addq $40, %rsp
|
||||||
|
@ -34,26 +53,6 @@
|
||||||
ret
|
ret
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* 2 argument SSE2 ISA version as wrapper to scalar. */
|
|
||||||
.macro WRAPPER_IMPL_SSE2_ff callee
|
|
||||||
subq $56, %rsp
|
|
||||||
cfi_adjust_cfa_offset (56)
|
|
||||||
movaps %xmm0, (%rsp)
|
|
||||||
movaps %xmm1, 16(%rsp)
|
|
||||||
call JUMPTARGET(\callee)
|
|
||||||
movsd %xmm0, 32(%rsp)
|
|
||||||
movsd 8(%rsp), %xmm0
|
|
||||||
movsd 24(%rsp), %xmm1
|
|
||||||
call JUMPTARGET(\callee)
|
|
||||||
movsd 32(%rsp), %xmm1
|
|
||||||
movsd %xmm0, 40(%rsp)
|
|
||||||
unpcklpd %xmm0, %xmm1
|
|
||||||
movaps %xmm1, %xmm0
|
|
||||||
addq $56, %rsp
|
|
||||||
cfi_adjust_cfa_offset (-56)
|
|
||||||
ret
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/* 3 argument SSE2 ISA version as wrapper to scalar. */
|
/* 3 argument SSE2 ISA version as wrapper to scalar. */
|
||||||
.macro WRAPPER_IMPL_SSE2_fFF callee
|
.macro WRAPPER_IMPL_SSE2_fFF callee
|
||||||
pushq %rbp
|
pushq %rbp
|
||||||
|
@ -62,30 +61,18 @@
|
||||||
pushq %rbx
|
pushq %rbx
|
||||||
cfi_adjust_cfa_offset (8)
|
cfi_adjust_cfa_offset (8)
|
||||||
cfi_rel_offset (%rbx, 0)
|
cfi_rel_offset (%rbx, 0)
|
||||||
|
subq $24, %rsp
|
||||||
|
cfi_adjust_cfa_offset (24)
|
||||||
|
movaps %xmm0, (%rsp)
|
||||||
movq %rdi, %rbp
|
movq %rdi, %rbp
|
||||||
movq %rsi, %rbx
|
movq %rsi, %rbx
|
||||||
subq $40, %rsp
|
|
||||||
cfi_adjust_cfa_offset (40)
|
|
||||||
leaq 16(%rsp), %rsi
|
|
||||||
leaq 24(%rsp), %rdi
|
|
||||||
movaps %xmm0, (%rsp)
|
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
leaq 16(%rsp), %rsi
|
movsd 8(%rsp), %xmm0
|
||||||
leaq 24(%rsp), %rdi
|
leaq 8(%rbp), %rdi
|
||||||
movsd 24(%rsp), %xmm0
|
leaq 8(%rbx), %rsi
|
||||||
movapd (%rsp), %xmm1
|
|
||||||
movsd %xmm0, 0(%rbp)
|
|
||||||
unpckhpd %xmm1, %xmm1
|
|
||||||
movsd 16(%rsp), %xmm0
|
|
||||||
movsd %xmm0, (%rbx)
|
|
||||||
movapd %xmm1, %xmm0
|
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movsd 24(%rsp), %xmm0
|
addq $24, %rsp
|
||||||
movsd %xmm0, 8(%rbp)
|
cfi_adjust_cfa_offset (-24)
|
||||||
movsd 16(%rsp), %xmm0
|
|
||||||
movsd %xmm0, 8(%rbx)
|
|
||||||
addq $40, %rsp
|
|
||||||
cfi_adjust_cfa_offset (-40)
|
|
||||||
popq %rbx
|
popq %rbx
|
||||||
cfi_adjust_cfa_offset (-8)
|
cfi_adjust_cfa_offset (-8)
|
||||||
cfi_restore (%rbx)
|
cfi_restore (%rbx)
|
||||||
|
@ -104,15 +91,17 @@
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-32, %rsp
|
andq $-32, %rsp
|
||||||
subq $32, %rsp
|
subq $32, %rsp
|
||||||
vextractf128 $1, %ymm0, (%rsp)
|
vmovaps %ymm0, (%rsp)
|
||||||
vzeroupper
|
vzeroupper
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovapd %xmm0, 16(%rsp)
|
vmovaps %xmm0, (%rsp)
|
||||||
vmovaps (%rsp), %xmm0
|
vmovaps 16(%rsp), %xmm0
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovapd %xmm0, %xmm1
|
/* combine xmm0 (return of second call) with result of first
|
||||||
vmovapd 16(%rsp), %xmm0
|
call (saved on stack). Might be worth exploring logic that
|
||||||
vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
uses `vpblend` and reads in ymm1 using -16(rsp). */
|
||||||
|
vmovaps (%rsp), %xmm1
|
||||||
|
vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -130,17 +119,19 @@
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-32, %rsp
|
andq $-32, %rsp
|
||||||
subq $64, %rsp
|
subq $64, %rsp
|
||||||
vextractf128 $1, %ymm0, 16(%rsp)
|
vmovaps %ymm0, (%rsp)
|
||||||
vextractf128 $1, %ymm1, (%rsp)
|
vmovaps %ymm1, 32(%rsp)
|
||||||
vzeroupper
|
vzeroupper
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps %xmm0, 32(%rsp)
|
vmovaps 48(%rsp), %xmm1
|
||||||
|
vmovaps %xmm0, (%rsp)
|
||||||
vmovaps 16(%rsp), %xmm0
|
vmovaps 16(%rsp), %xmm0
|
||||||
vmovaps (%rsp), %xmm1
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps %xmm0, %xmm1
|
/* combine xmm0 (return of second call) with result of first
|
||||||
vmovaps 32(%rsp), %xmm0
|
call (saved on stack). Might be worth exploring logic that
|
||||||
vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
uses `vpblend` and reads in ymm1 using -16(rsp). */
|
||||||
|
vmovaps (%rsp), %xmm1
|
||||||
|
vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -155,35 +146,21 @@
|
||||||
cfi_adjust_cfa_offset (8)
|
cfi_adjust_cfa_offset (8)
|
||||||
cfi_rel_offset (%rbp, 0)
|
cfi_rel_offset (%rbp, 0)
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
|
||||||
andq $-32, %rsp
|
andq $-32, %rsp
|
||||||
pushq %r13
|
subq $32, %rsp
|
||||||
cfi_adjust_cfa_offset (8)
|
vmovaps %ymm0, (%rsp)
|
||||||
cfi_rel_offset (%r13, 0)
|
pushq %rbx
|
||||||
pushq %r14
|
pushq %r14
|
||||||
cfi_adjust_cfa_offset (8)
|
movq %rdi, %rbx
|
||||||
cfi_rel_offset (%r14, 0)
|
|
||||||
subq $48, %rsp
|
|
||||||
movq %rsi, %r14
|
movq %rsi, %r14
|
||||||
movq %rdi, %r13
|
|
||||||
vextractf128 $1, %ymm0, 32(%rsp)
|
|
||||||
vzeroupper
|
vzeroupper
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps 32(%rsp), %xmm0
|
vmovaps 32(%rsp), %xmm0
|
||||||
lea (%rsp), %rdi
|
leaq 16(%rbx), %rdi
|
||||||
lea 16(%rsp), %rsi
|
leaq 16(%r14), %rsi
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovapd (%rsp), %xmm0
|
|
||||||
vmovapd 16(%rsp), %xmm1
|
|
||||||
vmovapd %xmm0, 16(%r13)
|
|
||||||
vmovapd %xmm1, 16(%r14)
|
|
||||||
addq $48, %rsp
|
|
||||||
popq %r14
|
popq %r14
|
||||||
cfi_adjust_cfa_offset (-8)
|
popq %rbx
|
||||||
cfi_restore (%r14)
|
|
||||||
popq %r13
|
|
||||||
cfi_adjust_cfa_offset (-8)
|
|
||||||
cfi_restore (%r13)
|
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -200,15 +177,16 @@
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-64, %rsp
|
andq $-64, %rsp
|
||||||
subq $128, %rsp
|
subq $64, %rsp
|
||||||
vmovups %zmm0, (%rsp)
|
vmovups %zmm0, (%rsp)
|
||||||
vmovupd (%rsp), %ymm0
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd %ymm0, 64(%rsp)
|
vmovupd %ymm0, (%rsp)
|
||||||
vmovupd 32(%rsp), %ymm0
|
vmovupd 32(%rsp), %ymm0
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd %ymm0, 96(%rsp)
|
/* combine ymm0 (return of second call) with result of first
|
||||||
vmovups 64(%rsp), %zmm0
|
call (saved on stack). */
|
||||||
|
vmovaps (%rsp), %ymm1
|
||||||
|
vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -225,18 +203,19 @@
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-64, %rsp
|
andq $-64, %rsp
|
||||||
subq $192, %rsp
|
addq $-128, %rsp
|
||||||
vmovups %zmm0, (%rsp)
|
vmovups %zmm0, (%rsp)
|
||||||
vmovups %zmm1, 64(%rsp)
|
vmovups %zmm1, 64(%rsp)
|
||||||
vmovupd (%rsp), %ymm0
|
/* ymm0 and ymm1 are already set. */
|
||||||
vmovupd 64(%rsp), %ymm1
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd %ymm0, 128(%rsp)
|
vmovups 96(%rsp), %ymm1
|
||||||
vmovupd 32(%rsp), %ymm0
|
vmovaps %ymm0, (%rsp)
|
||||||
vmovupd 96(%rsp), %ymm1
|
vmovups 32(%rsp), %ymm0
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd %ymm0, 160(%rsp)
|
/* combine ymm0 (return of second call) with result of first
|
||||||
vmovups 128(%rsp), %zmm0
|
call (saved on stack). */
|
||||||
|
vmovaps (%rsp), %ymm1
|
||||||
|
vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -253,34 +232,20 @@
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-64, %rsp
|
andq $-64, %rsp
|
||||||
pushq %r12
|
subq $64, %rsp
|
||||||
cfi_adjust_cfa_offset (8)
|
vmovaps %zmm0, (%rsp)
|
||||||
cfi_rel_offset (%r12, 0)
|
pushq %rbx
|
||||||
pushq %r13
|
pushq %r14
|
||||||
cfi_adjust_cfa_offset (8)
|
movq %rdi, %rbx
|
||||||
cfi_rel_offset (%r13, 0)
|
movq %rsi, %r14
|
||||||
subq $176, %rsp
|
/* ymm0 is already set. */
|
||||||
movq %rsi, %r13
|
|
||||||
vmovups %zmm0, (%rsp)
|
|
||||||
movq %rdi, %r12
|
|
||||||
vmovupd (%rsp), %ymm0
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd 32(%rsp), %ymm0
|
vmovaps 48(%rsp), %ymm0
|
||||||
lea 64(%rsp), %rdi
|
leaq 32(%rbx), %rdi
|
||||||
lea 96(%rsp), %rsi
|
leaq 32(%r14), %rsi
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd 64(%rsp), %ymm0
|
popq %r14
|
||||||
vmovupd 96(%rsp), %ymm1
|
popq %rbx
|
||||||
vmovupd %ymm0, 32(%r12)
|
|
||||||
vmovupd %ymm1, 32(%r13)
|
|
||||||
vzeroupper
|
|
||||||
addq $176, %rsp
|
|
||||||
popq %r13
|
|
||||||
cfi_adjust_cfa_offset (-8)
|
|
||||||
cfi_restore (%r13)
|
|
||||||
popq %r12
|
|
||||||
cfi_adjust_cfa_offset (-8)
|
|
||||||
cfi_restore (%r12)
|
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
|
|
@ -18,61 +18,66 @@
|
||||||
|
|
||||||
/* SSE2 ISA version as wrapper to scalar. */
|
/* SSE2 ISA version as wrapper to scalar. */
|
||||||
.macro WRAPPER_IMPL_SSE2 callee
|
.macro WRAPPER_IMPL_SSE2 callee
|
||||||
subq $40, %rsp
|
push %rbx
|
||||||
cfi_adjust_cfa_offset (40)
|
cfi_adjust_cfa_offset (8)
|
||||||
|
cfi_rel_offset (%rbx, 0)
|
||||||
|
subq $16, %rsp
|
||||||
|
cfi_adjust_cfa_offset (16)
|
||||||
movaps %xmm0, (%rsp)
|
movaps %xmm0, (%rsp)
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss %xmm0, 16(%rsp)
|
movss %xmm0, (%rsp)
|
||||||
movss 4(%rsp), %xmm0
|
movss 4(%rsp), %xmm0
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss %xmm0, 20(%rsp)
|
movss %xmm0, 4(%rsp)
|
||||||
movss 8(%rsp), %xmm0
|
movss 8(%rsp), %xmm0
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss %xmm0, 24(%rsp)
|
movd %xmm0, %ebx
|
||||||
movss 12(%rsp), %xmm0
|
movss 12(%rsp), %xmm0
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss 16(%rsp), %xmm3
|
movd %ebx, %xmm1
|
||||||
movss 20(%rsp), %xmm2
|
unpcklps %xmm0, %xmm1
|
||||||
movss 24(%rsp), %xmm1
|
movsd (%rsp), %xmm0
|
||||||
movss %xmm0, 28(%rsp)
|
unpcklpd %xmm1, %xmm0
|
||||||
unpcklps %xmm1, %xmm3
|
addq $16, %rsp
|
||||||
unpcklps %xmm0, %xmm2
|
cfi_adjust_cfa_offset (-16)
|
||||||
unpcklps %xmm2, %xmm3
|
popq %rbx
|
||||||
movaps %xmm3, %xmm0
|
cfi_adjust_cfa_offset (-8)
|
||||||
addq $40, %rsp
|
cfi_restore (%rbx)
|
||||||
cfi_adjust_cfa_offset (-40)
|
|
||||||
ret
|
ret
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* 2 argument SSE2 ISA version as wrapper to scalar. */
|
/* 2 argument SSE2 ISA version as wrapper to scalar. */
|
||||||
.macro WRAPPER_IMPL_SSE2_ff callee
|
.macro WRAPPER_IMPL_SSE2_ff callee
|
||||||
subq $56, %rsp
|
push %rbx
|
||||||
cfi_adjust_cfa_offset (56)
|
cfi_adjust_cfa_offset (8)
|
||||||
|
cfi_rel_offset (%rbx, 0)
|
||||||
|
subq $32, %rsp
|
||||||
|
cfi_adjust_cfa_offset (40)
|
||||||
movaps %xmm0, (%rsp)
|
movaps %xmm0, (%rsp)
|
||||||
movaps %xmm1, 16(%rsp)
|
movaps %xmm1, 16(%rsp)
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss %xmm0, 32(%rsp)
|
|
||||||
movss 4(%rsp), %xmm0
|
|
||||||
movss 20(%rsp), %xmm1
|
movss 20(%rsp), %xmm1
|
||||||
|
movss %xmm0, 0(%rsp)
|
||||||
|
movss 4(%rsp), %xmm0
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss %xmm0, 36(%rsp)
|
|
||||||
movss 8(%rsp), %xmm0
|
|
||||||
movss 24(%rsp), %xmm1
|
movss 24(%rsp), %xmm1
|
||||||
|
movss %xmm0, 4(%rsp)
|
||||||
|
movss 8(%rsp), %xmm0
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss %xmm0, 40(%rsp)
|
|
||||||
movss 12(%rsp), %xmm0
|
|
||||||
movss 28(%rsp), %xmm1
|
movss 28(%rsp), %xmm1
|
||||||
|
movd %xmm0, %ebx
|
||||||
|
movss 12(%rsp), %xmm0
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss 32(%rsp), %xmm3
|
/* merge 4x results into xmm0. */
|
||||||
movss 36(%rsp), %xmm2
|
movd %ebx, %xmm1
|
||||||
movss 40(%rsp), %xmm1
|
unpcklps %xmm0, %xmm1
|
||||||
movss %xmm0, 44(%rsp)
|
movsd (%rsp), %xmm0
|
||||||
unpcklps %xmm1, %xmm3
|
unpcklpd %xmm1, %xmm0
|
||||||
unpcklps %xmm0, %xmm2
|
addq $32, %rsp
|
||||||
unpcklps %xmm2, %xmm3
|
cfi_adjust_cfa_offset (-32)
|
||||||
movaps %xmm3, %xmm0
|
popq %rbx
|
||||||
addq $56, %rsp
|
cfi_adjust_cfa_offset (-8)
|
||||||
cfi_adjust_cfa_offset (-56)
|
cfi_restore (%rbx)
|
||||||
ret
|
ret
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -86,48 +91,24 @@
|
||||||
cfi_rel_offset (%rbx, 0)
|
cfi_rel_offset (%rbx, 0)
|
||||||
movq %rdi, %rbp
|
movq %rdi, %rbp
|
||||||
movq %rsi, %rbx
|
movq %rsi, %rbx
|
||||||
subq $40, %rsp
|
subq $24, %rsp
|
||||||
cfi_adjust_cfa_offset (40)
|
cfi_adjust_cfa_offset (24)
|
||||||
leaq 24(%rsp), %rsi
|
|
||||||
leaq 28(%rsp), %rdi
|
|
||||||
movaps %xmm0, (%rsp)
|
movaps %xmm0, (%rsp)
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
leaq 24(%rsp), %rsi
|
movss 4(%rsp), %xmm0
|
||||||
leaq 28(%rsp), %rdi
|
leaq 4(%rbp), %rdi
|
||||||
movss 28(%rsp), %xmm0
|
leaq 4(%rbx), %rsi
|
||||||
movss %xmm0, 0(%rbp)
|
|
||||||
movaps (%rsp), %xmm1
|
|
||||||
movss 24(%rsp), %xmm0
|
|
||||||
movss %xmm0, (%rbx)
|
|
||||||
movaps %xmm1, %xmm0
|
|
||||||
shufps $85, %xmm1, %xmm0
|
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss 28(%rsp), %xmm0
|
movss 8(%rsp), %xmm0
|
||||||
leaq 24(%rsp), %rsi
|
leaq 8(%rbp), %rdi
|
||||||
movss %xmm0, 4(%rbp)
|
leaq 8(%rbx), %rsi
|
||||||
leaq 28(%rsp), %rdi
|
|
||||||
movaps (%rsp), %xmm1
|
|
||||||
movss 24(%rsp), %xmm0
|
|
||||||
movss %xmm0, 4(%rbx)
|
|
||||||
movaps %xmm1, %xmm0
|
|
||||||
unpckhps %xmm1, %xmm0
|
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movaps (%rsp), %xmm1
|
movss 12(%rsp), %xmm0
|
||||||
leaq 24(%rsp), %rsi
|
leaq 12(%rbp), %rdi
|
||||||
leaq 28(%rsp), %rdi
|
leaq 12(%rbx), %rsi
|
||||||
movss 28(%rsp), %xmm0
|
|
||||||
shufps $255, %xmm1, %xmm1
|
|
||||||
movss %xmm0, 8(%rbp)
|
|
||||||
movss 24(%rsp), %xmm0
|
|
||||||
movss %xmm0, 8(%rbx)
|
|
||||||
movaps %xmm1, %xmm0
|
|
||||||
call JUMPTARGET(\callee)
|
call JUMPTARGET(\callee)
|
||||||
movss 28(%rsp), %xmm0
|
addq $24, %rsp
|
||||||
movss %xmm0, 12(%rbp)
|
cfi_adjust_cfa_offset (-24)
|
||||||
movss 24(%rsp), %xmm0
|
|
||||||
movss %xmm0, 12(%rbx)
|
|
||||||
addq $40, %rsp
|
|
||||||
cfi_adjust_cfa_offset (-40)
|
|
||||||
popq %rbx
|
popq %rbx
|
||||||
cfi_adjust_cfa_offset (-8)
|
cfi_adjust_cfa_offset (-8)
|
||||||
cfi_restore (%rbx)
|
cfi_restore (%rbx)
|
||||||
|
@ -146,15 +127,17 @@
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-32, %rsp
|
andq $-32, %rsp
|
||||||
subq $32, %rsp
|
subq $32, %rsp
|
||||||
vextractf128 $1, %ymm0, (%rsp)
|
vmovaps %ymm0, (%rsp)
|
||||||
vzeroupper
|
vzeroupper
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps %xmm0, 16(%rsp)
|
vmovaps %xmm0, (%rsp)
|
||||||
vmovaps (%rsp), %xmm0
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
|
||||||
vmovaps %xmm0, %xmm1
|
|
||||||
vmovaps 16(%rsp), %xmm0
|
vmovaps 16(%rsp), %xmm0
|
||||||
vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
|
/* combine xmm0 (return of second call) with result of first
|
||||||
|
call (saved on stack). Might be worth exploring logic that
|
||||||
|
uses `vpblend` and reads in ymm1 using -16(rsp). */
|
||||||
|
vmovaps (%rsp), %xmm1
|
||||||
|
vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -172,17 +155,19 @@
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-32, %rsp
|
andq $-32, %rsp
|
||||||
subq $64, %rsp
|
subq $64, %rsp
|
||||||
vextractf128 $1, %ymm0, 16(%rsp)
|
vmovaps %ymm0, (%rsp)
|
||||||
vextractf128 $1, %ymm1, (%rsp)
|
vmovaps %ymm1, 32(%rsp)
|
||||||
vzeroupper
|
vzeroupper
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps %xmm0, 32(%rsp)
|
vmovaps 48(%rsp), %xmm1
|
||||||
|
vmovaps %xmm0, (%rsp)
|
||||||
vmovaps 16(%rsp), %xmm0
|
vmovaps 16(%rsp), %xmm0
|
||||||
vmovaps (%rsp), %xmm1
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps %xmm0, %xmm1
|
/* combine xmm0 (return of second call) with result of first
|
||||||
vmovaps 32(%rsp), %xmm0
|
call (saved on stack). Might be worth exploring logic that
|
||||||
vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
uses `vpblend` and reads in ymm1 using -16(rsp). */
|
||||||
|
vmovaps (%rsp), %xmm1
|
||||||
|
vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -197,38 +182,21 @@
|
||||||
cfi_adjust_cfa_offset (8)
|
cfi_adjust_cfa_offset (8)
|
||||||
cfi_rel_offset (%rbp, 0)
|
cfi_rel_offset (%rbp, 0)
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
|
||||||
andq $-32, %rsp
|
andq $-32, %rsp
|
||||||
pushq %r13
|
subq $32, %rsp
|
||||||
cfi_adjust_cfa_offset (8)
|
|
||||||
cfi_rel_offset (%r13, 0)
|
|
||||||
pushq %r14
|
|
||||||
cfi_adjust_cfa_offset (8)
|
|
||||||
cfi_rel_offset (%r14, 0)
|
|
||||||
subq $48, %rsp
|
|
||||||
movq %rsi, %r14
|
|
||||||
vmovaps %ymm0, (%rsp)
|
vmovaps %ymm0, (%rsp)
|
||||||
movq %rdi, %r13
|
pushq %rbx
|
||||||
vmovaps 16(%rsp), %xmm1
|
pushq %r14
|
||||||
vmovaps %xmm1, 32(%rsp)
|
movq %rdi, %rbx
|
||||||
|
movq %rsi, %r14
|
||||||
vzeroupper
|
vzeroupper
|
||||||
vmovaps (%rsp), %xmm0
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps 32(%rsp), %xmm0
|
vmovaps 32(%rsp), %xmm0
|
||||||
lea (%rsp), %rdi
|
leaq 16(%rbx), %rdi
|
||||||
lea 16(%rsp), %rsi
|
leaq 16(%r14), %rsi
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps (%rsp), %xmm0
|
|
||||||
vmovaps 16(%rsp), %xmm1
|
|
||||||
vmovaps %xmm0, 16(%r13)
|
|
||||||
vmovaps %xmm1, 16(%r14)
|
|
||||||
addq $48, %rsp
|
|
||||||
popq %r14
|
popq %r14
|
||||||
cfi_adjust_cfa_offset (-8)
|
popq %rbx
|
||||||
cfi_restore (%r14)
|
|
||||||
popq %r13
|
|
||||||
cfi_adjust_cfa_offset (-8)
|
|
||||||
cfi_restore (%r13)
|
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -245,15 +213,16 @@
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-64, %rsp
|
andq $-64, %rsp
|
||||||
subq $128, %rsp
|
subq $64, %rsp
|
||||||
vmovups %zmm0, (%rsp)
|
vmovups %zmm0, (%rsp)
|
||||||
vmovupd (%rsp), %ymm0
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd %ymm0, 64(%rsp)
|
vmovupd %ymm0, (%rsp)
|
||||||
vmovupd 32(%rsp), %ymm0
|
vmovupd 32(%rsp), %ymm0
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovupd %ymm0, 96(%rsp)
|
/* combine ymm0 (return of second call) with result of first
|
||||||
vmovups 64(%rsp), %zmm0
|
call (saved on stack). */
|
||||||
|
vmovaps (%rsp), %ymm1
|
||||||
|
vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -270,18 +239,19 @@
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-64, %rsp
|
andq $-64, %rsp
|
||||||
subq $192, %rsp
|
addq $-128, %rsp
|
||||||
vmovups %zmm0, (%rsp)
|
vmovups %zmm0, (%rsp)
|
||||||
vmovups %zmm1, 64(%rsp)
|
vmovups %zmm1, 64(%rsp)
|
||||||
vmovups (%rsp), %ymm0
|
/* ymm0 and ymm1 are already set. */
|
||||||
vmovups 64(%rsp), %ymm1
|
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovups %ymm0, 128(%rsp)
|
|
||||||
vmovups 32(%rsp), %ymm0
|
|
||||||
vmovups 96(%rsp), %ymm1
|
vmovups 96(%rsp), %ymm1
|
||||||
|
vmovaps %ymm0, (%rsp)
|
||||||
|
vmovups 32(%rsp), %ymm0
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovups %ymm0, 160(%rsp)
|
/* combine ymm0 (return of second call) with result of first
|
||||||
vmovups 128(%rsp), %zmm0
|
call (saved on stack). */
|
||||||
|
vmovaps (%rsp), %ymm1
|
||||||
|
vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
@ -298,25 +268,20 @@
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
cfi_def_cfa_register (%rbp)
|
cfi_def_cfa_register (%rbp)
|
||||||
andq $-64, %rsp
|
andq $-64, %rsp
|
||||||
pushq %r12
|
subq $64, %rsp
|
||||||
pushq %r13
|
|
||||||
subq $176, %rsp
|
|
||||||
movq %rsi, %r13
|
|
||||||
vmovaps %zmm0, (%rsp)
|
vmovaps %zmm0, (%rsp)
|
||||||
movq %rdi, %r12
|
pushq %rbx
|
||||||
vmovaps (%rsp), %ymm0
|
pushq %r14
|
||||||
|
movq %rdi, %rbx
|
||||||
|
movq %rsi, %r14
|
||||||
|
/* ymm0 is already set. */
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps 32(%rsp), %ymm0
|
vmovaps 48(%rsp), %ymm0
|
||||||
lea 64(%rsp), %rdi
|
leaq 32(%rbx), %rdi
|
||||||
lea 96(%rsp), %rsi
|
leaq 32(%r14), %rsi
|
||||||
call HIDDEN_JUMPTARGET(\callee)
|
call HIDDEN_JUMPTARGET(\callee)
|
||||||
vmovaps 64(%rsp), %ymm0
|
popq %r14
|
||||||
vmovaps 96(%rsp), %ymm1
|
popq %rbx
|
||||||
vmovaps %ymm0, 32(%r12)
|
|
||||||
vmovaps %ymm1, 32(%r13)
|
|
||||||
addq $176, %rsp
|
|
||||||
popq %r13
|
|
||||||
popq %r12
|
|
||||||
movq %rbp, %rsp
|
movq %rbp, %rsp
|
||||||
cfi_def_cfa_register (%rsp)
|
cfi_def_cfa_register (%rsp)
|
||||||
popq %rbp
|
popq %rbp
|
||||||
|
|
Loading…
Reference in New Issue