Sandy Bridge Win64:
180 cycles in ff_synth_filter_inner_sse2
150 cycles in ff_synth_filter_inner_avx

Also switch to a three operand format for some instructions to avoid 
assembly errors with Yasm 1.1.0 or older.

Signed-off-by: James Almer <jamr...@gmail.com>
---
 libavcodec/x86/dcadsp.asm    | 76 +++++++++++++++++++++++++-------------------
 libavcodec/x86/dcadsp_init.c |  4 +++
 2 files changed, 48 insertions(+), 32 deletions(-)

diff --git a/libavcodec/x86/dcadsp.asm b/libavcodec/x86/dcadsp.asm
index 970ec3d..0d7c86e 100644
--- a/libavcodec/x86/dcadsp.asm
+++ b/libavcodec/x86/dcadsp.asm
@@ -200,18 +200,22 @@ DCA_LFE_FIR 0
 DCA_LFE_FIR 1
 
 %macro SETZERO 1
-%if cpuflag(sse2)
+%if cpuflag(sse2) && notcpuflag(avx)
     pxor          %1, %1
 %else
     xorps         %1, %1, %1
 %endif
 %endmacro
 
-%macro SHUF 2
-%if cpuflag(sse2)
-    pshufd        %1, %2, q0123
+%macro SHUF 3
+%if cpuflag(avx)
+    mova          %3, [%2 - 16]
+    vperm2f128    %1, %3, %3, 1
+    vshufps       %1, %1, %1, q0123
+%elif cpuflag(sse2)
+    pshufd        %1, [%2], q0123
 %else
-    mova          %1, %2
+    mova          %1, [%2]
     shufps        %1, %1, q0123
 %endif
 %endmacro
@@ -220,43 +224,43 @@ DCA_LFE_FIR 1
     ; reading backwards:  ptr1 = synth_buf + j + i; ptr2 = synth_buf + j - i
     ;~ a += window[i + j]      * (-synth_buf[15 - i + j])
     ;~ b += window[i + j + 16] * (synth_buf[i + j])
-    SHUF          m5, [ptr2 + j + (15 - 3) * 4]
+    SHUF          m5,  ptr2 + j + (15 - 3) * 4, m6
     mova          m6, [ptr1 + j]
 %if ARCH_X86_64
-    SHUF         m11, [ptr2 + j + (15 - 3) * 4 - mmsize]
+    SHUF         m11,  ptr2 + j + (15 - 3) * 4 - mmsize, m12
     mova         m12, [ptr1 + j + mmsize]
 %endif
-    mulps         m6, [win  + %1 + j + 16 * 4]
-    mulps         m5, [win  + %1 + j]
+    mulps         m6, m6,  [win + %1 + j + 16 * 4]
+    mulps         m5, m5,  [win + %1 + j]
 %if ARCH_X86_64
-    mulps        m12, [win  + %1 + j + mmsize + 16 * 4]
-    mulps        m11, [win  + %1 + j + mmsize]
+    mulps        m12, m12, [win + %1 + j + mmsize + 16 * 4]
+    mulps        m11, m11, [win + %1 + j + mmsize]
 %endif
-    addps         m2, m6
-    subps         m1, m5
+    addps         m2, m2, m6
+    subps         m1, m1, m5
 %if ARCH_X86_64
-    addps         m8, m12
-    subps         m7, m11
+    addps         m8, m8, m12
+    subps         m7, m7, m11
 %endif
     ;~ c += window[i + j + 32] * (synth_buf[16 + i + j])
     ;~ d += window[i + j + 48] * (synth_buf[31 - i + j])
-    SHUF          m6, [ptr2 + j + (31 - 3) * 4]
+    SHUF          m6,  ptr2 + j + (31 - 3) * 4, m5
     mova          m5, [ptr1 + j + 16 * 4]
 %if ARCH_X86_64
-    SHUF         m12, [ptr2 + j + (31 - 3) * 4 - mmsize]
+    SHUF         m12,  ptr2 + j + (31 - 3) * 4 - mmsize, m11
     mova         m11, [ptr1 + j + mmsize + 16 * 4]
 %endif
-    mulps         m5, [win  + %1 + j + 32 * 4]
-    mulps         m6, [win  + %1 + j + 48 * 4]
+    mulps         m5, m5,  [win + %1 + j + 32 * 4]
+    mulps         m6, m6,  [win + %1 + j + 48 * 4]
 %if ARCH_X86_64
-    mulps        m11, [win  + %1 + j + mmsize + 32 * 4]
-    mulps        m12, [win  + %1 + j + mmsize + 48 * 4]
+    mulps        m11, m11, [win + %1 + j + mmsize + 32 * 4]
+    mulps        m12, m12, [win + %1 + j + mmsize + 48 * 4]
 %endif
-    addps         m3, m5
-    addps         m4, m6
+    addps         m3, m3, m5
+    addps         m4, m4, m6
 %if ARCH_X86_64
-    addps         m9, m11
-    addps        m10, m12
+    addps         m9, m9, m11
+    addps        m10, m10, m12
 %endif
     sub            j, 64 * 4
 %endmacro
@@ -269,17 +273,21 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 
* ARCH_X86_64, \
                               synth_buf, synth_buf2, window, out, off, scale
 %define scale m0
 %if ARCH_X86_32 || WIN64
-%if cpuflag(sse2)
+%if cpuflag(sse2) && notcpuflag(avx)
     movd       scale, scalem
+    SPLATD        m0
 %else
-    movss      scale, scalem
+    VBROADCASTSS  m0, scalem
 %endif
 ; Make sure offset is in a register and not on the stack
 %define OFFQ  r4q
 %else
+    SPLATD      xmm0
+%if cpuflag(avx)
+    vinsertf128   m0, m0, xmm0, 1
+%endif
 %define OFFQ  offq
 %endif
-    SPLATD        m0
     ; prepare inner counter limit 1
     mov          r5q, 480
     sub          r5q, offmp
@@ -346,11 +354,11 @@ cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 
* ARCH_X86_64, \
 %endif
     ;~ out[i]      = a * scale;
     ;~ out[i + 16] = b * scale;
-    mulps         m1, scale
-    mulps         m2, scale
+    mulps         m1, m1, scale
+    mulps         m2, m2, scale
 %if ARCH_X86_64
-    mulps         m7, scale
-    mulps         m8, scale
+    mulps         m7, m7, scale
+    mulps         m8, m8, scale
 %endif
     ;~ synth_buf2[i]      = c;
     ;~ synth_buf2[i + 16] = d;
@@ -379,3 +387,7 @@ SYNTH_FILTER
 %endif
 INIT_XMM sse2
 SYNTH_FILTER
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+SYNTH_FILTER
+%endif
diff --git a/libavcodec/x86/dcadsp_init.c b/libavcodec/x86/dcadsp_init.c
index f8dd9b1..ab20635 100644
--- a/libavcodec/x86/dcadsp_init.c
+++ b/libavcodec/x86/dcadsp_init.c
@@ -79,6 +79,7 @@ static void synth_filter_##opt(FFTContext *imdct,             
                 \
 SYNTH_FILTER_FUNC(sse)
 #endif
 SYNTH_FILTER_FUNC(sse2)
+SYNTH_FILTER_FUNC(avx)
 
 av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
 {
@@ -92,4 +93,7 @@ av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
     if (EXTERNAL_SSE2(cpu_flags)) {
         s->synth_filter_float = synth_filter_sse2;
     }
+    if (EXTERNAL_AVX(cpu_flags)) {
+        s->synth_filter_float = synth_filter_avx;
+    }
 }
-- 
1.8.3.2

_______________________________________________
libav-devel mailing list
libav-devel@libav.org
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to