On Fri, 15 Apr 2022, Swinney, Jonathan wrote:

This commit adds new code paths for vscale when filterSize is 2, 4, or 8. By
using specialized code with unrolling to match the filterSize we can improve
performance.

| (seconds)   | c6g   |       |       |
| ------------| ----- | ----- | ----- |
| filterSize  | 2     | 4     | 8     |
| original    | 0.581 | 0.974 | 1.744 |
| optimized   | 0.399 | 0.569 | 1.052 |
| improvement | 31.1% | 41.6% | 39.7% |

Signed-off-by: Jonathan Swinney <jswin...@amazon.com>
---
libswscale/aarch64/output.S  | 147 +++++++++++++++++++++++++++++++++--
libswscale/aarch64/swscale.c |  12 +++
2 files changed, 153 insertions(+), 6 deletions(-)

diff --git a/libswscale/aarch64/output.S b/libswscale/aarch64/output.S
index af71de6050..9c99c3bea9 100644
--- a/libswscale/aarch64/output.S
+++ b/libswscale/aarch64/output.S
@@ -21,12 +21,27 @@
#include "libavutil/aarch64/asm.S"

function ff_yuv2planeX_8_neon, export=1
+// x0 - const int16_t *filter,
+// x1 - int filterSize,
+// x2 - const int16_t **src,
+// x3 - uint8_t *dest,
+// x4 - int dstW,
+// x5 - const uint8_t *dither,
+// x6 - int offset
+
        ld1                 {v0.8B}, [x5]                   // load 8x8-bit 
dither
        cbz                 w6, 1f                          // check if 
offsetting present
        ext                 v0.8B, v0.8B, v0.8B, #3         // honor offsetting 
which can be 0 or 3 only
1:      uxtl                v0.8H, v0.8B                    // extend dither to 
16-bit
        ushll               v1.4S, v0.4H, #12               // extend dither to 
32-bit with left shift by 12 (part 1)
        ushll2              v2.4S, v0.8H, #12               // extend dither to 
32-bit with left shift by 12 (part 2)
+        cmp                 w1, #8                          // if filterSize 
== 8, branch to specialized version
+        b.eq                5f
+        cmp                 w1, #4                          // if filterSize 
== 4, branch to specialized version
+        b.eq                7f
+        cmp                 w1, #2                          // if filterSize 
== 2, branch to specialized version
+        b.eq                9f
+
        mov                 x7, #0                          // i = 0
2:      mov                 v3.16B, v1.16B                  // initialize 
accumulator part 1 with dithering value
        mov                 v4.16B, v2.16B                  // initialize 
accumulator part 2 with dithering value
@@ -34,16 +49,15 @@ function ff_yuv2planeX_8_neon, export=1
        mov                 x9, x2                          // srcp    = src
        mov                 x10, x0                         // filterp = filter
3:      ldp                 x11, x12, [x9], #16             // get 2 pointers: 
src[j] and src[j+1]
+        ld2r                {v16.8H, v17.8H}, [x10], #4     // read 2x16-bit 
coeff X and Y at filter[j] and filter[j+1]
        add                 x11, x11, x7, lsl #1            // &src[j  ][i]
        add                 x12, x12, x7, lsl #1            // &src[j+1][i]
        ld1                 {v5.8H}, [x11]                  // read 8x16-bit @ 
src[j  ][i + {0..7}]: A,B,C,D,E,F,G,H
        ld1                 {v6.8H}, [x12]                  // read 8x16-bit @ 
src[j+1][i + {0..7}]: I,J,K,L,M,N,O,P
-        ld1r                {v7.8H}, [x10], #2              // read 1x16-bit 
coeff X at filter[j  ] and duplicate across lanes
-        ld1r                {v16.8H}, [x10], #2             // read 1x16-bit 
coeff Y at filter[j+1] and duplicate across lanes
-        smlal               v3.4S, v5.4H, v7.4H             // val0 += 
{A,B,C,D} * X
-        smlal2              v4.4S, v5.8H, v7.8H             // val1 += 
{E,F,G,H} * X
-        smlal               v3.4S, v6.4H, v16.4H            // val0 += 
{I,J,K,L} * Y
-        smlal2              v4.4S, v6.8H, v16.8H            // val1 += 
{M,N,O,P} * Y
+        smlal               v3.4S, v5.4H, v16.4H            // val0 += 
{A,B,C,D} * X
+        smlal2              v4.4S, v5.8H, v16.8H            // val1 += 
{E,F,G,H} * X
+        smlal               v3.4S, v6.4H, v17.4H            // val0 += 
{I,J,K,L} * Y
+        smlal2              v4.4S, v6.8H, v17.8H            // val1 += 
{M,N,O,P} * Y
        subs                w8, w8, #2                      // tmpfilterSize -= 
2
        b.gt                3b                              // loop until 
filterSize consumed

@@ -55,4 +69,125 @@ function ff_yuv2planeX_8_neon, export=1
        add                 x7, x7, #8                      // i += 8
        b.gt                2b                              // loop until width 
consumed
        ret
+
+5:      // fs=8
+        ldp                 x5, x6, [x2]                    // load 2 
pointers: src[j  ] and src[j+1]
+        ldp                 x7, x9, [x2, #16]               // load 2 
pointers: src[j+2] and src[j+3]
+        ldp                 x10, x11, [x2, #32]             // load 2 
pointers: src[j+4] and src[j+5]
+        ldp                 x12, x13, [x2, #48]             // load 2 
pointers: src[j+6] and src[j+7]
+
+        // load 8x16-bit values for filter[j], where j=0..7 and replicated 
across lanes
+        ld4r                {v16.8H, v17.8H, v18.8H, v19.8H}, [x0], #8
+        ld4r                {v20.8H, v21.8H, v22.8H, v23.8H}, [x0]

Instead of ld4r like this, would it make things better (or kept similar, or worse?) if this just would be a regular load, and you'd change the smlal instructions to take the third operand in the form of e.g. v0.h[0]? It'd save one instruction when loading the filter coefficients here, but I'm not sure if there's any other difference...

+6:
+        mov                 v3.16B, v1.16B                  // initialize 
accumulator part 1 with dithering value
+        mov                 v4.16B, v2.16B                  // initialize 
accumulator part 2 with dithering value
+
+        ld1                 {v24.8H}, [x5], #16             // load 8x16-bit 
values for src[j + 0][i + {0..7}]
+        ld1                 {v25.8H}, [x6], #16             // load 8x16-bit 
values for src[j + 1][i + {0..7}]
+        ld1                 {v26.8H}, [x7], #16             // load 8x16-bit 
values for src[j + 2][i + {0..7}]
+        ld1                 {v27.8H}, [x9], #16             // load 8x16-bit 
values for src[j + 3][i + {0..7}]
+        ld1                 {v28.8H}, [x10], #16            // load 8x16-bit 
values for src[j + 4][i + {0..7}]
+        ld1                 {v29.8H}, [x11], #16            // load 8x16-bit 
values for src[j + 5][i + {0..7}]
+        ld1                 {v30.8H}, [x12], #16            // load 8x16-bit 
values for src[j + 6][i + {0..7}]
+        ld1                 {v31.8H}, [x13], #16            // load 8x16-bit 
values for src[j + 7][i + {0..7}]
+
+        smlal               v3.4S, v24.4H, v16.4H           // val0 += 
src[0][i + {0..3}] * filter[0]
+        smlal2              v4.4S, v24.8H, v16.8H           // val1 += 
src[0][i + {4..7}] * filter[0]
+        smlal               v3.4S, v25.4H, v17.4H           // val0 += 
src[1][i + {0..3}] * filter[1]
+        smlal2              v4.4S, v25.8H, v17.8H           // val1 += 
src[1][i + {4..7}] * filter[1]
+        smlal               v3.4S, v26.4H, v18.4H           // val0 += 
src[2][i + {0..3}] * filter[2]
+        smlal2              v4.4S, v26.8H, v18.8H           // val1 += 
src[2][i + {4..7}] * filter[2]
+        smlal               v3.4S, v27.4H, v19.4H           // val0 += 
src[3][i + {0..3}] * filter[3]
+        smlal2              v4.4S, v27.8H, v19.8H           // val1 += 
src[3][i + {4..7}] * filter[3]
+        smlal               v3.4S, v28.4H, v20.4H           // val0 += 
src[4][i + {0..3}] * filter[4]
+        smlal2              v4.4S, v28.8H, v20.8H           // val1 += 
src[4][i + {4..7}] * filter[4]
+        smlal               v3.4S, v29.4H, v21.4H           // val0 += 
src[5][i + {0..3}] * filter[5]
+        smlal2              v4.4S, v29.8H, v21.8H           // val1 += 
src[5][i + {4..7}] * filter[5]
+        smlal               v3.4S, v30.4H, v22.4H           // val0 += 
src[6][i + {0..3}] * filter[6]
+        smlal2              v4.4S, v30.8H, v22.8H           // val1 += 
src[6][i + {4..7}] * filter[6]
+        smlal               v3.4S, v31.4H, v23.4H           // val0 += 
src[7][i + {0..3}] * filter[7]
+        smlal2              v4.4S, v31.8H, v23.8H           // val1 += 
src[7][i + {4..7}] * filter[7]
+
+        sqshrun             v3.4h, v3.4s, #16               // clip16(val0>>16)
+        sqshrun2            v3.8h, v4.4s, #16               // clip16(val1>>16)
+        uqshrn              v3.8b, v3.8h, #3                // clip8(val>>19)
+        st1                 {v3.8b}, [x3], #8               // write to 
destination
+        subs                w4, w4, #8                      // dstW -= 8
+        b.gt                6b                              // loop until 
width consumed

For code like this, I'd usually prefer to move the subs instruction somewhere else earlier, e.g. between the uqshrn and st1 (where there's a stall anyway), which moves setting of the condition codes from the subs further away from the b.gt that uses it.

+        ret
+
+7:      // fs=4
+        ldp                 x5, x6, [x2]                    // load 2 
pointers: src[j  ] and src[j+1]
+        ldp                 x7, x9, [x2, #16]               // load 2 
pointers: src[j+2] and src[j+3]
+
+        // load 4x16-bit values for filter[j], where j=0..3 and replicated 
across lanes
+        ld4r                {v16.8H, v17.8H, v18.8H, v19.8H}, [x0]
+8:
+        mov                 v3.16B, v1.16B                  // initialize 
accumulator part 1 with dithering value
+        mov                 v4.16B, v2.16B                  // initialize 
accumulator part 2 with dithering value
+
+        ld1                 {v24.8H}, [x5], #16             // load 8x16-bit 
values for src[j + 0][i + {0..7}]
+        ld1                 {v25.8H}, [x6], #16             // load 8x16-bit 
values for src[j + 1][i + {0..7}]
+        ld1                 {v26.8H}, [x7], #16             // load 8x16-bit 
values for src[j + 2][i + {0..7}]
+        ld1                 {v27.8H}, [x9], #16             // load 8x16-bit 
values for src[j + 3][i + {0..7}]
+
+        smlal               v3.4S, v24.4H, v16.4H           // val0 += 
src[0][i + {0..3}] * filter[0]
+        smlal2              v4.4S, v24.8H, v16.8H           // val1 += 
src[0][i + {4..7}] * filter[0]
+        smlal               v3.4S, v25.4H, v17.4H           // val0 += 
src[1][i + {0..3}] * filter[1]
+        smlal2              v4.4S, v25.8H, v17.8H           // val1 += 
src[1][i + {4..7}] * filter[1]
+        smlal               v3.4S, v26.4H, v18.4H           // val0 += 
src[2][i + {0..3}] * filter[2]
+        smlal2              v4.4S, v26.8H, v18.8H           // val1 += 
src[2][i + {4..7}] * filter[2]
+        smlal               v3.4S, v27.4H, v19.4H           // val0 += 
src[3][i + {0..3}] * filter[3]
+        smlal2              v4.4S, v27.8H, v19.8H           // val1 += 
src[3][i + {4..7}] * filter[3]
+
+        sqshrun             v3.4h, v3.4s, #16               // clip16(val0>>16)
+        sqshrun2            v3.8h, v4.4s, #16               // clip16(val1>>16)
+        uqshrn              v3.8b, v3.8h, #3                // clip8(val>>19)
+        st1                 {v3.8b}, [x3], #8               // write to 
destination
+        subs                w4, w4, #8                      // dstW -= 8
+        b.gt                8b                              // loop until 
width consumed
+        ret
+
+9:      // fs=2
+        ldp                 x5, x6, [x2]                    // load 2 
pointers: src[j  ] and src[j+1]
+
+        // load 2x16-bit values for filter[j], where j=0..1 and replicated 
across lanes
+        ld2r                {v16.8H, v17.8H}, [x0]
+10:
+        mov                 v3.16B, v1.16B                  // initialize 
accumulator part 1 with dithering value
+        mov                 v4.16B, v2.16B                  // initialize 
accumulator part 2 with dithering value
+
+        ld1                 {v24.8H}, [x5], #16             // load 8x16-bit 
values for src[j + 0][i + {0..7}]
+        ld1                 {v25.8H}, [x6], #16             // load 8x16-bit 
values for src[j + 1][i + {0..7}]
+
+        smlal               v3.4S, v24.4H, v16.4H           // val0 += 
src[0][i + {0..3}] * filter[0]
+        smlal2              v4.4S, v24.8H, v16.8H           // val1 += 
src[0][i + {4..7}] * filter[0]
+        smlal               v3.4S, v25.4H, v17.4H           // val0 += 
src[1][i + {0..3}] * filter[1]
+        smlal2              v4.4S, v25.8H, v17.8H           // val1 += 
src[1][i + {4..7}] * filter[1]
+
+        sqshrun             v3.4h, v3.4s, #16               // clip16(val0>>16)
+        sqshrun2            v3.8h, v4.4s, #16               // clip16(val1>>16)
+        uqshrn              v3.8b, v3.8h, #3                // clip8(val>>19)
+        st1                 {v3.8b}, [x3], #8               // write to 
destination
+        subs                w4, w4, #8                      // dstW -= 8
+        b.gt                10b                             // loop until 
width consumed
+        ret
+endfunc
+function ff_yuv2plane1_8_neon, export=1

Nitpick: It'd be nice with an empty line before this new function

+        ld1                 {v0.8B}, [x5]                   // load 8x8-bit 
dither
+        cbz                 w6, 1f                          // check if 
offsetting present
+        ext                 v0.8B, v0.8B, v0.8B, #3         // honor 
offsetting which can be 0 or 3 only
+1:      uxtl                v0.8H, v0.8B                    // extend dither 
to 16-bit
+
+
+2:
+        ld1                 {v5.8H}, [x0], #16              // read 8x16-bit @ 
src[j  ][i + {0..7}]: A,B,C,D,E,F,G,H
+        add                 v3.8H, v0.8H, v5.8H
+
+        uqshrn              v3.8b, v3.8h, #7                // clip8(val>>7)
+        st1                 {v3.8b}, [x1], #8               // write to 
destination
+        subs                w2, w2, #8                      // dstW -= 8
+        b.gt                2b                              // loop until 
width consumed
+        ret
endfunc

Same thing about placement of subs here, I'd prefer to move it up a line (or maybe even to directly after the ld1?).

diff --git a/libswscale/aarch64/swscale.c b/libswscale/aarch64/swscale.c
index 2ea4ccb3a6..0cc821bf11 100644
--- a/libswscale/aarch64/swscale.c
+++ b/libswscale/aarch64/swscale.c
@@ -40,6 +40,12 @@ ALL_SCALE_FUNCS(neon);
void ff_yuv2planeX_8_neon(const int16_t *filter, int filterSize,
                          const int16_t **src, uint8_t *dest, int dstW,
                          const uint8_t *dither, int offset);
+void ff_yuv2plane1_8_neon(
+        const int16_t *src,
+        uint8_t *dest,
+        int dstW,
+        const uint8_t *dither,
+        int offset);

#define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt) do {              \
    if (c->srcBpc == 8 && c->dstBpc <= 14) {                            \
@@ -56,6 +62,11 @@ void ff_yuv2planeX_8_neon(const int16_t *filter, int 
filterSize,
               ASSIGN_SCALE_FUNC2(hscalefn, X8, opt);                   \
           break;                                                       \
  }
+#define ASSIGN_VSCALE_FUNC(vscalefn, opt1)                              \
+    switch(c->dstBpc){                                                  \
+    case 8: vscalefn = ff_yuv2plane1_8_  ## opt1;  break;               \
+    default: break;                                                     \
+    }

Why "opt1" and not just "opt"? Please add spaces between parentheses and breaces.

Overall this patch looks quite straightforward without anything significant to add, but I'd like to have the tests extended to cover the new function and the existing tests extended to cover the filtersize=2 case too.

// Martin

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to