From 2f516e0236bd84d78ce6fd7e55c4b1a3c9d99baa Mon Sep 17 00:00:00 2001
From: sunyuechi <sunyue...@iscas.ac.cn>
Date: Sat, 20 Apr 2024 23:32:10 +0800
Subject: [PATCH 1/3] lavc/vp8dsp: R-V V loop_filter_simple

C908:
vp8_loop_filter_simple_h_c: 416.0
vp8_loop_filter_simple_h_rvv_i32: 187.5
vp8_loop_filter_simple_v_c: 429.7
vp8_loop_filter_simple_v_rvv_i32: 104.0
---
 libavcodec/riscv/vp8dsp_init.c |   5 ++
 libavcodec/riscv/vp8dsp_rvv.S  | 105 +++++++++++++++++++++++++++++++++
 2 files changed, 110 insertions(+)

diff --git a/libavcodec/riscv/vp8dsp_init.c b/libavcodec/riscv/vp8dsp_init.c
index 2dd583d079..46ca71ed04 100644
--- a/libavcodec/riscv/vp8dsp_init.c
+++ b/libavcodec/riscv/vp8dsp_init.c
@@ -38,6 +38,8 @@ VP8_BILIN(16, rvv);
 VP8_BILIN(8,  rvv);
 VP8_BILIN(4,  rvv);
 
+VP8_LF(rvv);
+
 av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c)
 {
 #if HAVE_RVV
@@ -120,6 +122,9 @@ av_cold void ff_vp8dsp_init_riscv(VP8DSPContext *c)
         if (flags & AV_CPU_FLAG_RVB_ADDR) {
             c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_rvv;
         }
+
+        c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_rvv;
+        c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_rvv;
     }
 #endif
 }
diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
index ba644f0f47..2eadfc5766 100644
--- a/libavcodec/riscv/vp8dsp_rvv.S
+++ b/libavcodec/riscv/vp8dsp_rvv.S
@@ -72,6 +72,111 @@ func ff_vp8_idct_dc_add4uv_rvv, zve32x
         ret
 endfunc
 
+.macro filter_fmin len a f1 p0f2 q0f1
+.ifc \len,16
+        vsetvli         zero, zero, e16, m2, ta, ma
+.else
+        vsetvli         zero, zero, e16, m1, ta, ma
+.endif
+        vsext.vf2       \q0f1, \a
+        vmin.vx         \p0f2, \q0f1, a7
+        vmin.vx         \q0f1, \q0f1, t3
+        vadd.vi         \p0f2, \p0f2, 3
+        vadd.vi         \q0f1, \q0f1, 4
+        vsra.vi         \p0f2, \p0f2, 3
+        vsra.vi         \f1,   \q0f1, 3
+        vadd.vv         \p0f2, \p0f2, v8
+        vsub.vv         \q0f1, v16, \f1
+        vmax.vx         \p0f2, \p0f2, zero
+        vmax.vx         \q0f1, \q0f1, zero
+.endm
+
+.macro filter len type normal inner dst stride fE fI thresh
+.ifc \type,v
+        slli            a6, \stride, 1
+        sub             t2, \dst, a6
+        add             t4, \dst, \stride
+        sub             t1, \dst, \stride
+        vle8.v          v1, (t2)
+        vle8.v          v11, (t4)
+        vle8.v          v17, (t1)
+        vle8.v          v22, (\dst)
+.else
+        addi            t1, \dst, -1
+        addi            a6, \dst, -2
+        addi            t4, \dst, 1
+        vlse8.v         v1, (a6), \stride
+        vlse8.v         v11, (t4), \stride
+        vlse8.v         v17, (t1), \stride
+        vlse8.v         v22, (\dst), \stride
+.endif
+        vwsubu.vv       v12, v1, v11             // p1-q1
+        vwsubu.vv       v24, v22, v17            // q0-p0
+        vnclip.wi       v23, v12, 0
+
+.ifc \len,16
+        vsetvli         zero, zero, e16, m2, ta, ma
+.else
+        vsetvli         zero, zero, e16, m1, ta, ma
+.endif
+
+        // vp8_simple_limit(dst + i, stride, flim)
+        li              a7, 2
+        vneg.v          v18, v12
+        vmax.vv         v18, v18, v12
+        vneg.v          v8, v24
+        vmax.vv         v8, v8, v24
+        vsrl.vi         v18, v18, 1
+        vmacc.vx        v18, a7, v8
+        vmsleu.vx       v0, v18, \fE
+
+        li              t5, 3
+        li              a7, 124
+        li              t3, 123
+        vsext.vf2       v4, v23
+        vzext.vf2       v8, v17                  // p0
+        vzext.vf2       v16, v22                 // q0
+        vmul.vx         v30, v24, t5
+        vadd.vv         v12, v30, v4
+
+.ifc \len,16
+        vsetvli         zero, zero, e8, m1, ta, ma
+.else
+        vsetvli         zero, zero, e8, mf2, ta, ma
+.endif
+        vnclip.wi       v11, v12, 0
+        filter_fmin     \len v11 v24 v4 v6
+
+.ifc \len,16
+        vsetvli         zero, zero, e8, m1, ta, ma
+.else
+        vsetvli         zero, zero, e8, mf2, ta, ma
+.endif
+        vnclipu.wi      v4, v4, 0
+        vnclipu.wi      v6, v6, 0
+
+.ifc \type,v
+        vse8.v          v4, (t1), v0.t
+        vse8.v          v6, (\dst), v0.t
+.else
+        vsse8.v         v4, (t1), \stride, v0.t
+        vsse8.v         v6, (\dst), \stride, v0.t
+.endif
+
+.endm
+
+func ff_vp8_v_loop_filter16_simple_rvv, zve32x
+        vsetivli        zero, 16, e8, m1, ta, ma
+        filter 16 v 0 0 a0 a1 a2 a3 a4
+        ret
+endfunc
+
+func ff_vp8_h_loop_filter16_simple_rvv, zve32x
+        vsetivli        zero, 16, e8, m1, ta, ma
+        filter 16 h 0 0 a0 a1 a2 a3 a4
+        ret
+endfunc
+
 .macro put_vp8_pixels
 1:
         addi          a4, a4, -1
-- 
2.44.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to