On Mon, 25 Jul 2022, Hubert Mazur wrote:

Provide optimized implementation of pix_abs8 function for arm64.

Performance comparison tests are shown below.
- pix_abs_1_0_c: 105.2
- pix_abs_1_0_neon: 21.4
- sad_1_c: 107.2
- sad_1_neon: 20.9

Benchmarks and tests are run with checkasm tool on AWS Graviton 3.
---
libavcodec/aarch64/me_cmp_init_aarch64.c |  4 ++
libavcodec/aarch64/me_cmp_neon.S         | 53 ++++++++++++++++++++++++
2 files changed, 57 insertions(+)

diff --git a/libavcodec/aarch64/me_cmp_init_aarch64.c 
b/libavcodec/aarch64/me_cmp_init_aarch64.c
index 89c817990c..7d7dc38754 100644
--- a/libavcodec/aarch64/me_cmp_init_aarch64.c
+++ b/libavcodec/aarch64/me_cmp_init_aarch64.c
@@ -31,6 +31,8 @@ int ff_pix_abs16_x2_neon(MpegEncContext *v, uint8_t *pix1, 
uint8_t *pix2,
                      ptrdiff_t stride, int h);
int ff_pix_abs16_y2_neon(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
                      ptrdiff_t stride, int h);
+int ff_pix_abs8_neon(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
+                      ptrdiff_t stride, int h);

int sse16_neon(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
                  ptrdiff_t stride, int h);
@@ -48,8 +50,10 @@ av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, 
AVCodecContext *avctx)
        c->pix_abs[0][1] = ff_pix_abs16_x2_neon;
        c->pix_abs[0][2] = ff_pix_abs16_y2_neon;
        c->pix_abs[0][3] = ff_pix_abs16_xy2_neon;
+        c->pix_abs[1][0] = ff_pix_abs8_neon;

        c->sad[0] = ff_pix_abs16_neon;
+        c->sad[1] = ff_pix_abs8_neon;
        c->sse[0] = sse16_neon;
        c->sse[1] = sse8_neon;
        c->sse[2] = sse4_neon;
diff --git a/libavcodec/aarch64/me_cmp_neon.S b/libavcodec/aarch64/me_cmp_neon.S
index dcaffc9b73..f2dd63ced1 100644
--- a/libavcodec/aarch64/me_cmp_neon.S
+++ b/libavcodec/aarch64/me_cmp_neon.S
@@ -72,6 +72,59 @@ function ff_pix_abs16_neon, export=1
        ret
endfunc

+function ff_pix_abs8_neon, export=1
+        // x0           unused
+        // x1           uint8_t *pix1
+        // x2           uint8_t *pix2
+        // x3           ptrdiff_t stride
+        // x4           int h
+
+        movi            d18, #0
+        cmp             w4, #4
+        b.lt            2f
+
+// make 4 iterations at once
+1:
+        ld1             {v0.8b}, [x1], x3               // Load pix1 for first 
iteration
+        ld1             {v1.8b}, [x2], x3               // Load pix2 for first 
iteration
+        uabdl           v30.8h, v0.8b, v1.8b            // Absolute 
difference, first iteration
+        ld1             {v2.8b}, [x1], x3               // Load pix1 for 
second iteration
+        ld1             {v3.8b}, [x2], x3               // Load pix2 for 
second iteration
+        uabal           v30.8h, v2.8b, v3.8b            // Absolute 
difference, second iteration
+        ld1             {v4.8b}, [x1], x3               // Load pix1 for third 
iteration
+        ld1             {v5.8b}, [x2], x3               // Load pix2 for third 
iteration
+        uabal           v30.8h, v4.8b, v5.8b            // Absolute 
difference, third iteration
+        ld1             {v6.8b}, [x1], x3               // Load pix1 for 
foruth iteration
+        ld1             {v7.8b}, [x2], x3               // Load pix2 for 
fourth iteration
+        uabal           v30.8h, v6.8b, v7.8b            // Absolute 
difference, foruth iteration

This is maybe the simplest example so far, where the unrolled version here just is 4 identical serial copies of the same set of 3 instructions; this maybe helps a bit on some CPUs, but it doesn't help nearly as much as it can on others, if it would be better unrolled.

I.e., same comments as for the other patches; improve interleaving, don't do uaddlv once per iteration.

// Martin

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to