x86/float_dsp: unroll loop in vector_fmac_scalar

~6% faster SSE2 performance. AVX/FMA3 are unaffected.

Signed-off-by: James Almer <jamrial@gmail.com>
Reviewed-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
James Almer 2014-04-16 02:09:36 -03:00 committed by Michael Niedermayer
parent 27f184ef40
commit 11b36b1ee0

View File

@ -61,9 +61,9 @@ VECTOR_FMUL
%macro VECTOR_FMAC_SCALAR 0
%if UNIX64
cglobal vector_fmac_scalar, 3,3,3, dst, src, len
cglobal vector_fmac_scalar, 3,3,5, dst, src, len
%else
cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
%endif
%if ARCH_X86_32
VBROADCASTSS m0, mulm
@ -78,23 +78,31 @@ cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
%endif
lea lenq, [lend*4-64]
.loop:
%assign a 0
%rep 32/mmsize
%if cpuflag(fma3)
mova m1, [dstq+lenq+(a+0)*mmsize]
mova m2, [dstq+lenq+(a+1)*mmsize]
fmaddps m1, m0, [srcq+lenq+(a+0)*mmsize], m1
fmaddps m2, m0, [srcq+lenq+(a+1)*mmsize], m2
%else
mulps m1, m0, [srcq+lenq+(a+0)*mmsize]
mulps m2, m0, [srcq+lenq+(a+1)*mmsize]
addps m1, m1, [dstq+lenq+(a+0)*mmsize]
addps m2, m2, [dstq+lenq+(a+1)*mmsize]
%endif
mova [dstq+lenq+(a+0)*mmsize], m1
mova [dstq+lenq+(a+1)*mmsize], m2
%assign a a+2
%endrep
mova m1, [dstq+lenq]
mova m2, [dstq+lenq+1*mmsize]
fmaddps m1, m0, [srcq+lenq], m1
fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
%else ; cpuflag
mulps m1, m0, [srcq+lenq]
mulps m2, m0, [srcq+lenq+1*mmsize]
%if mmsize < 32
mulps m3, m0, [srcq+lenq+2*mmsize]
mulps m4, m0, [srcq+lenq+3*mmsize]
%endif ; mmsize
addps m1, m1, [dstq+lenq]
addps m2, m2, [dstq+lenq+1*mmsize]
%if mmsize < 32
addps m3, m3, [dstq+lenq+2*mmsize]
addps m4, m4, [dstq+lenq+3*mmsize]
%endif ; mmsize
%endif ; cpuflag
mova [dstq+lenq], m1
mova [dstq+lenq+1*mmsize], m2
%if mmsize < 32
mova [dstq+lenq+2*mmsize], m3
mova [dstq+lenq+3*mmsize], m4
%endif ; mmsize
sub lenq, 64
jge .loop
REP_RET