ffmpeg | branch: master | James Almer <jamr...@gmail.com> | Tue Oct 24 19:10:22 
2017 -0300| [c0683dce89eceaab8783b8b47dd2346afc0a9276] | committer: James Almer

Merge commit '0b9a237b2386ff84a6f99716bd58fa27a1b767e7'

* commit '0b9a237b2386ff84a6f99716bd58fa27a1b767e7':
  hevc: Add NEON 4x4 and 8x8 IDCT

[15:12:59] <@ubitux> hevc_idct_4x4_8_c: 389.1
[15:13:00] <@ubitux> hevc_idct_4x4_8_neon: 126.6
[15:13:02] <@ubitux> our ^
[15:13:06] <@ubitux> hevc_idct_4x4_8_c: 389.3
[15:13:08] <@ubitux> hevc_idct_4x4_8_neon: 107.8
[15:13:10] <@ubitux> hevc_idct_4x4_10_c: 418.6
[15:13:12] <@ubitux> hevc_idct_4x4_10_neon: 108.1
[15:13:14] <@ubitux> libav ^
[15:13:30] <@ubitux> so yeah, we can probably trash our versions here

Merged-by: James Almer <jamr...@gmail.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=c0683dce89eceaab8783b8b47dd2346afc0a9276
---

 libavcodec/arm/hevcdsp_arm.h       |   2 +-
 libavcodec/arm/hevcdsp_idct_neon.S | 430 +++++++++++++++++--------------------
 libavcodec/arm/hevcdsp_init_arm.c  |   6 +-
 libavcodec/arm/hevcdsp_init_neon.c |  17 +-
 libavcodec/hevcdsp.c               |   4 +-
 libavcodec/hevcdsp.h               |   3 +-
 6 files changed, 221 insertions(+), 241 deletions(-)

diff --git a/libavcodec/arm/hevcdsp_arm.h b/libavcodec/arm/hevcdsp_arm.h
index 7735df9cd2..47cdfa574d 100644
--- a/libavcodec/arm/hevcdsp_arm.h
+++ b/libavcodec/arm/hevcdsp_arm.h
@@ -21,6 +21,6 @@
 
 #include "libavcodec/hevcdsp.h"
 
-void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth);
+void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth);
 
 #endif /* AVCODEC_ARM_HEVCDSP_ARM_H */
diff --git a/libavcodec/arm/hevcdsp_idct_neon.S 
b/libavcodec/arm/hevcdsp_idct_neon.S
index e39d00634b..627b9b4750 100644
--- a/libavcodec/arm/hevcdsp_idct_neon.S
+++ b/libavcodec/arm/hevcdsp_idct_neon.S
@@ -1,5 +1,7 @@
 /*
+ * ARM NEON optimised IDCT functions for HEVC decoding
  * Copyright (c) 2014 Seppo Tomperi <seppo.tomp...@vtt.fi>
+ * Copyright (c) 2017 Alexandra Hájková
  *
  * This file is part of FFmpeg.
  *
@@ -19,7 +21,13 @@
  */
 
 #include "libavutil/arm/asm.S"
-#include "neon.S"
+
+const trans, align=4
+        .short 64, 83, 64, 36
+        .short 89, 75, 50, 18
+        .short 90, 87, 80, 70
+        .short 57, 43, 25, 9
+endconst
 
 function ff_hevc_idct_4x4_dc_neon_8, export=1
         ldrsh       r1, [r0]
@@ -168,30 +176,6 @@ function ff_hevc_add_residual_32x32_neon_8, export=1
         bx          lr
 endfunc
 
-.macro  transpose_16b_8x8   r0, r1, r2, r3, r4, r5, r6, r7
-        vtrn.64         \r0, \r4
-        vtrn.64         \r1, \r5
-        vtrn.64         \r2, \r6
-        vtrn.64         \r3, \r7
-        vtrn.32         \r0, \r2
-        vtrn.32         \r1, \r3
-        vtrn.32         \r4, \r6
-        vtrn.32         \r5, \r7
-        vtrn.16         \r0, \r1
-        vtrn.16         \r2, \r3
-        vtrn.16         \r4, \r5
-        vtrn.16         \r6, \r7
-.endm
-
-// in 4 q regs
-// output 8 d regs
-.macro transpose_16b_4x4    r0, r1, r2, r3
-        vtrn.32         \r0, \r2
-        vtrn.32         \r1, \r3
-        vtrn.16         \r0, \r1
-        vtrn.16         \r2, \r3
-.endm
-
 /* uses registers q2 - q9 for temp values */
 /* TODO: reorder */
 .macro tr4_luma_shift r0, r1, r2, r3, shift
@@ -225,67 +209,6 @@ endfunc
         vqrshrn.s32   \r3, q5, \shift
 .endm
 
-/* uses registers q2 - q6 for temp values */
-.macro tr4 r0, r1, r2, r3
-        vmull.s16  q4, \r1, d0[0]   // 83 * src1
-        vmull.s16  q6, \r1, d0[1]   // 36 * src1
-        vshll.s16  q2, \r0, #6   // 64 * src0
-        vshll.s16  q3, \r2, #6   // 64 * src2
-        vadd.s32   q5, q2, q3    // 64 * (src0 + src2)     e0
-        vsub.s32   q2, q2, q3    // 64 * (src0 - src2)     e1
-        vmlal.s16  q4, \r3, d0[1]   // 83 * src1 + 36 * src3  o0
-        vmlsl.s16  q6, \r3, d0[0]   // 36 * src1 - 83 * src3  o1
-
-        vsub.s32   q3, q5, q4    // e0 - o0
-        vadd.s32   q4, q5, q4    // e0 + o0
-        vadd.s32   q5, q2, q6    // e1 + o1
-        vsub.s32   q6, q2, q6    // e1 - o1
-.endm
-
-.macro tr4_shift r0, r1, r2, r3, shift
-        vmull.s16  q4, \r1, d0[0]   // 83 * src1
-        vmull.s16  q6, \r1, d0[1]   // 36 * src1
-        vshll.s16  q2, \r0, #6   // 64 * src0
-        vshll.s16  q3, \r2, #6   // 64 * src2
-        vadd.s32   q5, q2, q3    // 64 * (src0 + src2)     e0
-        vsub.s32   q2, q2, q3    // 64 * (src0 - src2)     e1
-        vmlal.s16  q4, \r3, d0[1]   // 83 * src1 + 36 * src3  o0
-        vmlsl.s16  q6, \r3, d0[0]   // 36 * src1 - 83 * src3  o1
-
-        vsub.s32   q3, q5, q4    // e0 - o0
-        vadd.s32   q4, q5, q4    // e0 + o0
-        vadd.s32   q5, q2, q6    // e1 + o1
-        vsub.s32   q6, q2, q6    // e1 - o1
-
-        vqrshrn.s32   \r0, q4, \shift
-        vqrshrn.s32   \r1, q5, \shift
-        vqrshrn.s32   \r2, q6, \shift
-        vqrshrn.s32   \r3, q3, \shift
-.endm
-
-function ff_hevc_transform_4x4_neon_8, export=1
-        vpush       {d8-d15}
-        vld1.16     {q14, q15}, [r0]  // coeffs
-        ldr         r3, =0x00240053 // 36 and 83
-        vmov.32     d0[0], r3
-
-        tr4_shift d28, d29, d30, d31, #7
-
-        vtrn.16     d28, d29
-        vtrn.16     d30, d31
-        vtrn.32     q14, q15
-
-        tr4_shift d28, d29, d30, d31, #12
-
-        vtrn.16     d28, d29
-        vtrn.16     d30, d31
-        vtrn.32     q14, q15
-
-        vst1.16     {q14, q15}, [r0]
-        vpop        {d8-d15}
-        bx lr
-endfunc
-
 function ff_hevc_transform_luma_4x4_neon_8, export=1
         vpush       {d8-d15}
         vld1.16     {q14, q15}, [r0]  // coeffs
@@ -312,154 +235,201 @@ function ff_hevc_transform_luma_4x4_neon_8, export=1
         bx lr
 endfunc
 
-.macro tr8_begin in0, in1, in2, in3
-        vmull.s16  q7, \in0, d1[1]   // 89 * src1
-        vmull.s16  q8, \in0, d1[0]   // 75 * src1
-        vmull.s16  q9, \in0, d1[3]   // 50 * src1
-        vmull.s16  q10, \in0, d1[2]  // 18 * src1
-
-        vmlal.s16  q7, \in1, d1[0]   // 75 * src3
-        vmlsl.s16  q8, \in1, d1[2]   //-18 * src3
-        vmlsl.s16  q9, \in1, d1[1]   //-89 * src3
-        vmlsl.s16  q10, \in1, d1[3]  //-50 * src3
-
-        vmlal.s16  q7, \in2, d1[3]   // 50 * src5
-        vmlsl.s16  q8, \in2, d1[1]   //-89 * src5
-        vmlal.s16  q9, \in2, d1[2]   // 18 * src5
-        vmlal.s16  q10, \in2, d1[0]  // 75 * src5
-
-        vmlal.s16  q7, \in3, d1[2]   // 18 * src7
-        vmlsl.s16  q8, \in3, d1[3]   //-50 * src7
-        vmlal.s16  q9, \in3, d1[0]   // 75 * src7
-        vmlsl.s16  q10, \in3, d1[1]  //-89 * src7
+.macro sum_sub out, in, c, op
+  .ifc \op, +
+        vmlal.s16       \out, \in, \c
+  .else
+        vmlsl.s16       \out, \in, \c
+  .endif
+.endm
+
+.macro tr_4x4 in0, in1, in2, in3, out0, out1, out2, out3, shift, tmp0, tmp1, 
tmp2, tmp3, tmp4
+         vshll.s16      \tmp0, \in0, #6
+         vmull.s16      \tmp2, \in1, d4[1]
+         vmov           \tmp1, \tmp0
+         vmull.s16      \tmp3, \in1, d4[3]
+         vmlal.s16      \tmp0, \in2, d4[0] @e0
+         vmlsl.s16      \tmp1, \in2, d4[0] @e1
+         vmlal.s16      \tmp2, \in3, d4[3] @o0
+         vmlsl.s16      \tmp3, \in3, d4[1] @o1
+
+         vadd.s32       \tmp4, \tmp0, \tmp2
+         vsub.s32       \tmp0, \tmp0, \tmp2
+         vadd.s32       \tmp2, \tmp1, \tmp3
+         vsub.s32       \tmp1, \tmp1, \tmp3
+         vqrshrn.s32    \out0, \tmp4, #\shift
+         vqrshrn.s32    \out3, \tmp0, #\shift
+         vqrshrn.s32    \out1, \tmp2, #\shift
+         vqrshrn.s32    \out2, \tmp1, #\shift
+.endm
+
+.macro tr_4x4_8 in0, in1, in2, in3, out0, out1, out2, out3, tmp0, tmp1, tmp2, 
tmp3
+         vshll.s16      \tmp0, \in0, #6
+         vld1.s16       {\in0}, [r1, :64]!
+         vmov           \tmp1, \tmp0
+         vmull.s16      \tmp2, \in1, \in0[1]
+         vmull.s16      \tmp3, \in1, \in0[3]
+         vmlal.s16      \tmp0, \in2, \in0[0] @e0
+         vmlsl.s16      \tmp1, \in2, \in0[0] @e1
+         vmlal.s16      \tmp2, \in3, \in0[3] @o0
+         vmlsl.s16      \tmp3, \in3, \in0[1] @o1
+
+         vld1.s16       {\in0}, [r1, :64]
+
+         vadd.s32       \out0, \tmp0, \tmp2
+         vadd.s32       \out1, \tmp1, \tmp3
+         vsub.s32       \out2, \tmp1, \tmp3
+         vsub.s32       \out3, \tmp0, \tmp2
+
+         sub            r1,  r1,  #8
 .endm
 
-.macro tr8_end shift
-        vadd.s32   q1, q4, q7   //  e_8[0] + o_8[0], dst[0]
-        vsub.s32   q4, q4, q7   //  e_8[0] - o_8[0], dst[7]
-
-        vadd.s32   q2, q5, q8   // e_8[1] + o_8[1], dst[1]
-        vsub.s32   q5, q5, q8   // e_8[1] - o_8[1], dst[6]
-
-        vadd.s32   q11, q6, q9  // e_8[2] + o_8[2], dst[2]
-        vsub.s32    q6, q6, q9  // e_8[2] - o_8[2], dst[5]
-
-        vadd.s32   q12, q3, q10 // e_8[3] + o_8[3], dst[3]
-        vsub.s32   q3, q3, q10  // e_8[3] - o_8[3], dst[4]
-        vqrshrn.s32   d2, q1, \shift
-        vqrshrn.s32   d3, q2, \shift
-        vqrshrn.s32   d4, q11, \shift
-        vqrshrn.s32   d5, q12, \shift
-        vqrshrn.s32   d6, q3, \shift
-        vqrshrn.s32   d7, q6, \shift
-        vqrshrn.s32   d9, q4, \shift
-        vqrshrn.s32   d8, q5, \shift
+@ Do a 4x4 transpose, using q registers for the subtransposes that don't
+@ need to address the indiviudal d registers.
+@ r0,r1 == rq0, r2,r3 == rq1
+.macro transpose_4x4 rq0, rq1, r0, r1, r2, r3
+        vtrn.32         \rq0, \rq1
+        vtrn.16         \r0,  \r1
+        vtrn.16         \r2,  \r3
 .endm
 
-function ff_hevc_transform_8x8_neon_8, export=1
-        push   {r4-r8}
-        vpush {d8-d15}
-        mov    r5, #16
-
-        adr       r3, tr4f
-        vld1.16   {d0, d1}, [r3]
-
-        // left half
-        vld1.16 {d24}, [r0], r5
-        vld1.16 {d25}, [r0], r5
-        vld1.16 {d26}, [r0], r5
-        vld1.16 {d27}, [r0], r5
-        vld1.16 {d28}, [r0], r5
-        vld1.16 {d29}, [r0], r5
-        vld1.16 {d30}, [r0], r5
-        vld1.16 {d31}, [r0], r5
-        sub      r0, #128
-        tr8_begin d25, d27, d29, d31
-        tr4       d24, d26, d28, d30
-        tr8_end   #7
-        vst1.16 {d2}, [r0], r5
-        vst1.16 {d3}, [r0], r5
-        vst1.16 {d4}, [r0], r5
-        vst1.16 {d5}, [r0], r5
-        vst1.16 {d6}, [r0], r5
-        vst1.16 {d7}, [r0], r5
-        vst1.16 {d8}, [r0], r5
-        vst1.16 {d9}, [r0], r5
-        sub      r0, #128
-        //skip right half if col_limit in r1 is less than 4
-        cmp      r1, #4
-        blt      1f
-        //right half
-        add      r0, #8
-        vld1.16 {d24}, [r0], r5
-        vld1.16 {d25}, [r0], r5
-        vld1.16 {d26}, [r0], r5
-        vld1.16 {d27}, [r0], r5
-        vld1.16 {d28}, [r0], r5
-        vld1.16 {d29}, [r0], r5
-        vld1.16 {d30}, [r0], r5
-        vld1.16 {d31}, [r0], r5
-        sub      r0, #128
-        tr8_begin d25, d27, d29, d31
-        tr4       d24, d26, d28, d30
-        tr8_end   #7
-        vst1.16 {d2}, [r0], r5
-        vst1.16 {d3}, [r0], r5
-        vst1.16 {d4}, [r0], r5
-        vst1.16 {d5}, [r0], r5
-        vst1.16 {d6}, [r0], r5
-        vst1.16 {d7}, [r0], r5
-        vst1.16 {d8}, [r0], r5
-        vst1.16 {d9}, [r0], r5
-        sub      r0, #136
-1:
-        // top half
-        vldm r0, {q12-q15} // coeffs
-        transpose_16b_4x4 d24, d26, d28, d30
-        transpose_16b_4x4 d25, d27, d29, d31
-        tr8_begin d26, d30, d27, d31
-        tr4 d24, d28, d25, d29
-        tr8_end #12
-        transpose_16b_4x4 d2, d3, d4, d5
-        transpose_16b_4x4 d6, d7, d8, d9
-        vswp     d7, d5
-        vswp     d7, d8
-        vswp     d3, d6
-        vswp     d6, d4
-        vstm r0!, {q1-q4}
-
-        // bottom half
-        vldm r0, {q12-q15} // coeffs
-        transpose_16b_4x4 d24, d26, d28, d30
-        transpose_16b_4x4 d25, d27, d29, d31
-        tr8_begin d26, d30, d27, d31
-        tr4 d24, d28, d25, d29
-        tr8_end #12
-        transpose_16b_4x4 d2, d3, d4, d5
-        transpose_16b_4x4 d6, d7, d8, d9
-        vswp     d7, d5
-        vswp     d7, d8
-        vswp     d3, d6
-        vswp     d6, d4
-        //vstm     r0, {q1-q4}
-        vst1.16 {q1-q2}, [r0]
-        add     r0, #32
-        vst1.16 {q3-q4}, [r0]
-        sub     r0, #32
-        vpop {d8-d15}
-        pop {r4-r8}
+.macro idct_4x4 bitdepth
+function ff_hevc_idct_4x4_\bitdepth\()_neon, export=1
+@r0 - coeffs
+        vld1.s16        {q0-q1}, [r0, :128]
+
+        movrel          r1, trans
+        vld1.s16        {d4}, [r1, :64]
+
+        tr_4x4          d0, d1, d2, d3, d16, d17, d18, d19, 7, q10, q11, q12, 
q13, q0
+        transpose_4x4   q8, q9, d16, d17, d18, d19
+
+        tr_4x4          d16, d17, d18, d19, d0, d1, d2, d3, 20 - \bitdepth, 
q10, q11, q12, q13, q0
+        transpose_4x4   q0, q1, d0, d1, d2, d3
+        vst1.s16        {d0-d3}, [r0, :128]
         bx lr
 endfunc
+.endm
+
+.macro transpose8_4x4 r0, r1, r2, r3
+        vtrn.16         \r0,  \r1
+        vtrn.16         \r2,  \r3
+        vtrn.32         \r0,  \r2
+        vtrn.32         \r1,  \r3
+.endm
+
+.macro transpose_8x8 r0, r1, r2, r3, r4, r5, r6, r7, l0, l1, l2, l3, l4, l5, 
l6, l7
+        transpose8_4x4  \r0, \r1, \r2, \r3
+        transpose8_4x4  \r4, \r5, \r6, \r7
+
+        transpose8_4x4  \l0, \l1, \l2, \l3
+        transpose8_4x4  \l4, \l5, \l6, \l7
+.endm
+
+.macro tr_8x4 shift, in0, in1, in2, in3, in4, in5, in6, in7
+        tr_4x4_8        \in0, \in2, \in4, \in6, q8, q9, q10, q11, q12, q13, 
q14, q15
+
+        vmull.s16       q14, \in1, \in0[2]
+        vmull.s16       q12, \in1, \in0[0]
+        vmull.s16       q13, \in1, \in0[1]
+        sum_sub         q14, \in3, \in0[0], -
+        sum_sub         q12, \in3, \in0[1], +
+        sum_sub         q13, \in3, \in0[3], -
+
+        sum_sub         q14, \in5, \in0[3], +
+        sum_sub         q12, \in5, \in0[2], +
+        sum_sub         q13, \in5, \in0[0], -
+
+        sum_sub         q14, \in7, \in0[1], +
+        sum_sub         q12, \in7, \in0[3], +
+        sum_sub         q13, \in7, \in0[2], -
+
+        vadd.s32        q15, q10, q14
+        vsub.s32        q10, q10, q14
+        vqrshrn.s32     \in2, q15, \shift
+
+        vmull.s16       q15, \in1, \in0[3]
+        sum_sub         q15, \in3, \in0[2], -
+        sum_sub         q15, \in5, \in0[1], +
+        sum_sub         q15, \in7, \in0[0], -
+
+        vqrshrn.s32     \in5, q10,  \shift
+
+        vadd.s32        q10, q8, q12
+        vsub.s32        q8,  q8, q12
+        vadd.s32        q12, q9, q13
+        vsub.s32        q9,  q9, q13
+        vadd.s32        q14, q11, q15
+        vsub.s32        q11, q11, q15
+
+        vqrshrn.s32     \in0, q10, \shift
+        vqrshrn.s32     \in7, q8,  \shift
+        vqrshrn.s32     \in1, q12, \shift
+        vqrshrn.s32     \in6, q9,  \shift
+        vqrshrn.s32     \in3, q14, \shift
+        vqrshrn.s32     \in4, q11, \shift
+.endm
+
+.macro idct_8x8 bitdepth
+function ff_hevc_idct_8x8_\bitdepth\()_neon, export=1
+@r0 - coeffs
+        vpush           {q4-q7}
+
+        mov             r1,  r0
+        mov             r2,  #64
+        add             r3,  r0,  #32
+        vld1.s16        {q0-q1}, [r1,:128], r2
+        vld1.s16        {q2-q3}, [r3,:128], r2
+        vld1.s16        {q4-q5}, [r1,:128], r2
+        vld1.s16        {q6-q7}, [r3,:128], r2
+
+        movrel          r1, trans
+
+        tr_8x4          7, d0, d2, d4, d6, d8, d10, d12, d14
+        tr_8x4          7, d1, d3, d5, d7, d9, d11, d13, d15
+
+        @ Transpose each 4x4 block, and swap how d4-d7 and d8-d11 are used.
+        @ Layout before:
+        @ d0  d1
+        @ d2  d3
+        @ d4  d5
+        @ d6  d7
+        @ d8  d9
+        @ d10 d11
+        @ d12 d13
+        @ d14 d15
+        transpose_8x8   d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, d9, 
d11, d13, d15
+        @ Now the layout is:
+        @ d0  d8
+        @ d2  d10
+        @ d4  d12
+        @ d6  d14
+        @ d1  d9
+        @ d3  d11
+        @ d5  d13
+        @ d7  d15
+
+        tr_8x4          20 - \bitdepth, d0, d2, d4, d6, d1, d3, d5, d7
+        vswp            d0, d8
+        tr_8x4          20 - \bitdepth, d0, d10, d12, d14, d9, d11, d13, d15
+        vswp            d0, d8
+
+        transpose_8x8   d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, d9, 
d11, d13, d15
+
+        mov             r1,  r0
+        mov             r2,  #64
+        add             r3,  r0,  #32
+        vst1.s16        {q0-q1}, [r1,:128], r2
+        vst1.s16        {q2-q3}, [r3,:128], r2
+        vst1.s16        {q4-q5}, [r1,:128], r2
+        vst1.s16        {q6-q7}, [r3,:128], r2
+
+        vpop            {q4-q7}
+        bx              lr
+endfunc
+.endm
 
-.align 4
-tr4f:
-.word 0x00240053  // 36 and d1[0] = 83
-.word 0x00000000
-tr8f:
-.word 0x0059004b  // 89, d0[0] = 75
-.word 0x00320012  // 50, d0[2] = 18
-tr16:
-.word 0x005a0057  // 90, d2[0] = 87
-.word 0x00500046  // 80, d2[2] = 70
-.word 0x0039002b  // 57, d2[0] = 43
-.word 0x00190009  // 25, d2[2] = 9
+idct_4x4 8
+idct_4x4 10
+idct_8x8 8
+idct_8x8 10
diff --git a/libavcodec/arm/hevcdsp_init_arm.c 
b/libavcodec/arm/hevcdsp_init_arm.c
index adcc454511..e8fa1f79ac 100644
--- a/libavcodec/arm/hevcdsp_init_arm.c
+++ b/libavcodec/arm/hevcdsp_init_arm.c
@@ -19,14 +19,16 @@
  */
 
 #include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
 #include "libavutil/arm/cpu.h"
+
 #include "libavcodec/hevcdsp.h"
 #include "hevcdsp_arm.h"
 
-av_cold void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth)
+av_cold void ff_hevc_dsp_init_arm(HEVCDSPContext *c, const int bit_depth)
 {
     int cpu_flags = av_get_cpu_flags();
 
     if (have_neon(cpu_flags))
-        ff_hevcdsp_init_neon(c, bit_depth);
+        ff_hevc_dsp_init_neon(c, bit_depth);
 }
diff --git a/libavcodec/arm/hevcdsp_init_neon.c 
b/libavcodec/arm/hevcdsp_init_neon.c
index 1a3912c609..8bc430e012 100644
--- a/libavcodec/arm/hevcdsp_init_neon.c
+++ b/libavcodec/arm/hevcdsp_init_neon.c
@@ -27,8 +27,10 @@ void ff_hevc_v_loop_filter_luma_neon(uint8_t *_pix, 
ptrdiff_t _stride, int _beta
 void ff_hevc_h_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
 void ff_hevc_v_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
*_tc, uint8_t *_no_p, uint8_t *_no_q);
 void ff_hevc_h_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
*_tc, uint8_t *_no_p, uint8_t *_no_q);
-void ff_hevc_transform_4x4_neon_8(int16_t *coeffs, int col_limit);
-void ff_hevc_transform_8x8_neon_8(int16_t *coeffs, int col_limit);
+void ff_hevc_idct_4x4_8_neon(int16_t *coeffs, int col_limit);
+void ff_hevc_idct_8x8_8_neon(int16_t *coeffs, int col_limit);
+void ff_hevc_idct_4x4_10_neon(int16_t *coeffs, int col_limit);
+void ff_hevc_idct_8x8_10_neon(int16_t *coeffs, int col_limit);
 void ff_hevc_idct_4x4_dc_neon_8(int16_t *coeffs);
 void ff_hevc_idct_8x8_dc_neon_8(int16_t *coeffs);
 void ff_hevc_idct_16x16_dc_neon_8(int16_t *coeffs);
@@ -142,7 +144,7 @@ void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, 
ptrdiff_t dststride, uint8_t
     put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, 
height, src2, MAX_PB_SIZE);
 }
 
-av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth)
+av_cold void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth)
 {
     if (bit_depth == 8) {
         int x;
@@ -150,8 +152,8 @@ av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const 
int bit_depth)
         c->hevc_h_loop_filter_luma     = ff_hevc_h_loop_filter_luma_neon;
         c->hevc_v_loop_filter_chroma   = ff_hevc_v_loop_filter_chroma_neon;
         c->hevc_h_loop_filter_chroma   = ff_hevc_h_loop_filter_chroma_neon;
-        c->idct[0]                     = ff_hevc_transform_4x4_neon_8;
-        c->idct[1]                     = ff_hevc_transform_8x8_neon_8;
+        c->idct[0]                     = ff_hevc_idct_4x4_8_neon;
+        c->idct[1]                     = ff_hevc_idct_8x8_8_neon;
         c->idct_dc[0]                  = ff_hevc_idct_4x4_dc_neon_8;
         c->idct_dc[1]                  = ff_hevc_idct_8x8_dc_neon_8;
         c->idct_dc[2]                  = ff_hevc_idct_16x16_dc_neon_8;
@@ -221,4 +223,9 @@ av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const 
int bit_depth)
         c->put_hevc_qpel_uni[8][0][0]  = ff_hevc_put_qpel_uw_pixels_w48_neon_8;
         c->put_hevc_qpel_uni[9][0][0]  = ff_hevc_put_qpel_uw_pixels_w64_neon_8;
     }
+
+    if (bit_depth == 10) {
+        c->idct[0] = ff_hevc_idct_4x4_10_neon;
+        c->idct[1] = ff_hevc_idct_8x8_10_neon;
+    }
 }
diff --git a/libavcodec/hevcdsp.c b/libavcodec/hevcdsp.c
index e432aa3cf9..957e40d5ff 100644
--- a/libavcodec/hevcdsp.c
+++ b/libavcodec/hevcdsp.c
@@ -257,12 +257,12 @@ int i = 0;
         break;
     }
 
+    if (ARCH_ARM)
+        ff_hevc_dsp_init_arm(hevcdsp, bit_depth);
     if (ARCH_PPC)
         ff_hevc_dsp_init_ppc(hevcdsp, bit_depth);
     if (ARCH_X86)
         ff_hevc_dsp_init_x86(hevcdsp, bit_depth);
-    if (ARCH_ARM)
-        ff_hevcdsp_init_arm(hevcdsp, bit_depth);
     if (ARCH_MIPS)
         ff_hevc_dsp_init_mips(hevcdsp, bit_depth);
 }
diff --git a/libavcodec/hevcdsp.h b/libavcodec/hevcdsp.h
index dc48ebca11..0ae67cba85 100644
--- a/libavcodec/hevcdsp.h
+++ b/libavcodec/hevcdsp.h
@@ -127,8 +127,9 @@ void ff_hevc_dsp_init(HEVCDSPContext *hpc, int bit_depth);
 extern const int8_t ff_hevc_epel_filters[7][4];
 extern const int8_t ff_hevc_qpel_filters[3][16];
 
+void ff_hevc_dsp_init_arm(HEVCDSPContext *c, const int bit_depth);
 void ff_hevc_dsp_init_ppc(HEVCDSPContext *c, const int bit_depth);
 void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int bit_depth);
-void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth);
 void ff_hevc_dsp_init_mips(HEVCDSPContext *c, const int bit_depth);
+
 #endif /* AVCODEC_HEVCDSP_H */


======================================================================

diff --cc libavcodec/arm/hevcdsp_arm.h
index 7735df9cd2,0000000000..47cdfa574d
mode 100644,000000..100644
--- a/libavcodec/arm/hevcdsp_arm.h
+++ b/libavcodec/arm/hevcdsp_arm.h
@@@ -1,26 -1,0 +1,26 @@@
 +/*
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
USA
 + */
 +
 +#ifndef AVCODEC_ARM_HEVCDSP_ARM_H
 +#define AVCODEC_ARM_HEVCDSP_ARM_H
 +
 +#include "libavcodec/hevcdsp.h"
 +
- void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth);
++void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth);
 +
 +#endif /* AVCODEC_ARM_HEVCDSP_ARM_H */
diff --cc libavcodec/arm/hevcdsp_idct_neon.S
index e39d00634b,0000000000..627b9b4750
mode 100644,000000..100644
--- a/libavcodec/arm/hevcdsp_idct_neon.S
+++ b/libavcodec/arm/hevcdsp_idct_neon.S
@@@ -1,465 -1,0 +1,435 @@@
 +/*
++ * ARM NEON optimised IDCT functions for HEVC decoding
 + * Copyright (c) 2014 Seppo Tomperi <seppo.tomp...@vtt.fi>
++ * Copyright (c) 2017 Alexandra Hájková
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
USA
 + */
 +
 +#include "libavutil/arm/asm.S"
- #include "neon.S"
++
++const trans, align=4
++        .short 64, 83, 64, 36
++        .short 89, 75, 50, 18
++        .short 90, 87, 80, 70
++        .short 57, 43, 25, 9
++endconst
 +
 +function ff_hevc_idct_4x4_dc_neon_8, export=1
 +        ldrsh       r1, [r0]
 +        ldr         r2, =0x20
 +        add         r1, #1
 +        asr         r1, #1
 +        add         r1, r2
 +        asr         r1, #6
 +        vdup.16     q0, r1
 +        vdup.16     q1, r1
 +        vst1.16     {q0, q1}, [r0]
 +        bx lr
 +endfunc
 +
 +function ff_hevc_idct_8x8_dc_neon_8, export=1
 +        ldrsh       r1, [r0]
 +        ldr         r2, =0x20
 +        add         r1, #1
 +        asr         r1, #1
 +        add         r1, r2
 +        asr         r1, #6
 +        vdup.16     q8, r1
 +        vdup.16     q9, r1
 +        vmov.16     q10, q8
 +        vmov.16     q11, q8
 +        vmov.16     q12, q8
 +        vmov.16     q13, q8
 +        vmov.16     q14, q8
 +        vmov.16     q15, q8
 +        vstm        r0, {q8-q15}
 +        bx lr
 +endfunc
 +
 +function ff_hevc_idct_16x16_dc_neon_8, export=1
 +        ldrsh       r1, [r0]
 +        ldr         r2, =0x20
 +        add         r1, #1
 +        asr         r1, #1
 +        add         r1, r2
 +        asr         r1, #6
 +        vdup.16     q8, r1
 +        vdup.16     q9, r1
 +        vmov.16     q10, q8
 +        vmov.16     q11, q8
 +        vmov.16     q12, q8
 +        vmov.16     q13, q8
 +        vmov.16     q14, q8
 +        vmov.16     q15, q8
 +        vstm        r0!, {q8-q15}
 +        vstm        r0!, {q8-q15}
 +        vstm        r0!, {q8-q15}
 +        vstm        r0, {q8-q15}
 +        bx lr
 +endfunc
 +
 +function ff_hevc_idct_32x32_dc_neon_8, export=1
 +        ldrsh       r1, [r0]
 +        ldr         r2, =0x20
 +        add         r1, #1
 +        asr         r1, #1
 +        add         r1, r2
 +        asr         r1, #6
 +        mov         r3, #16
 +        vdup.16     q8, r1
 +        vdup.16     q9, r1
 +        vmov.16     q10, q8
 +        vmov.16     q11, q8
 +        vmov.16     q12, q8
 +        vmov.16     q13, q8
 +        vmov.16     q14, q8
 +        vmov.16     q15, q8
 +1:      subs        r3, #1
 +        vstm        r0!, {q8-q15}
 +        bne         1b
 +        bx lr
 +endfunc
 +
 +function ff_hevc_add_residual_4x4_neon_8, export=1
 +        vldm        r1, {q0-q1}
 +        vld1.32     d4[0], [r0], r2
 +        vld1.32     d4[1], [r0], r2
 +        vld1.32     d5[0], [r0], r2
 +        vld1.32     d5[1], [r0], r2
 +        sub         r0, r0, r2, lsl #2
 +        vmovl.u8    q8, d4
 +        vmovl.u8    q9, d5
 +        vqadd.s16   q0, q0, q8
 +        vqadd.s16   q1, q1, q9
 +        vqmovun.s16 d0, q0
 +        vqmovun.s16 d1, q1
 +        vst1.32     d0[0], [r0], r2
 +        vst1.32     d0[1], [r0], r2
 +        vst1.32     d1[0], [r0], r2
 +        vst1.32     d1[1], [r0], r2
 +        bx          lr
 +endfunc
 +
 +function ff_hevc_add_residual_8x8_neon_8, export=1
 +        mov         r3,   #8
 +1:      subs        r3,   #1
 +        vld1.16     {q0}, [r1]!
 +        vld1.8      d16,  [r0]
 +        vmovl.u8    q8,   d16
 +        vqadd.s16   q0,   q8
 +        vqmovun.s16 d0,   q0
 +        vst1.32     d0,   [r0], r2
 +        bne         1b
 +        bx          lr
 +endfunc
 +
 +function ff_hevc_add_residual_16x16_neon_8, export=1
 +        mov         r3,   #16
 +1:      subs        r3,   #1
 +        vld1.16     {q0, q1}, [r1]!
 +        vld1.8      {q8},  [r0]
 +        vmovl.u8    q9,  d16
 +        vmovl.u8    q10, d17
 +        vqadd.s16   q0,  q9
 +        vqadd.s16   q1,  q10
 +        vqmovun.s16 d0,  q0
 +        vqmovun.s16 d1,  q1
 +        vst1.8      {q0},   [r0], r2
 +        bne         1b
 +        bx          lr
 +endfunc
 +
 +function ff_hevc_add_residual_32x32_neon_8, export=1
 +        mov         r3,   #32
 +1:      subs        r3,   #1
 +        vldm        r1!, {q0-q3}
 +        vld1.8      {q8, q9},  [r0]
 +        vmovl.u8    q10, d16
 +        vmovl.u8    q11, d17
 +        vmovl.u8    q12, d18
 +        vmovl.u8    q13, d19
 +        vqadd.s16   q0,  q10
 +        vqadd.s16   q1,  q11
 +        vqadd.s16   q2,  q12
 +        vqadd.s16   q3,  q13
 +        vqmovun.s16 d0,  q0
 +        vqmovun.s16 d1,  q1
 +        vqmovun.s16 d2,  q2
 +        vqmovun.s16 d3,  q3
 +        vst1.8     {q0, q1},   [r0], r2
 +        bne         1b
 +        bx          lr
 +endfunc
 +
- .macro  transpose_16b_8x8   r0, r1, r2, r3, r4, r5, r6, r7
-         vtrn.64         \r0, \r4
-         vtrn.64         \r1, \r5
-         vtrn.64         \r2, \r6
-         vtrn.64         \r3, \r7
-         vtrn.32         \r0, \r2
-         vtrn.32         \r1, \r3
-         vtrn.32         \r4, \r6
-         vtrn.32         \r5, \r7
-         vtrn.16         \r0, \r1
-         vtrn.16         \r2, \r3
-         vtrn.16         \r4, \r5
-         vtrn.16         \r6, \r7
- .endm
- 
- // in 4 q regs
- // output 8 d regs
- .macro transpose_16b_4x4    r0, r1, r2, r3
-         vtrn.32         \r0, \r2
-         vtrn.32         \r1, \r3
-         vtrn.16         \r0, \r1
-         vtrn.16         \r2, \r3
- .endm
- 
 +/* uses registers q2 - q9 for temp values */
 +/* TODO: reorder */
 +.macro tr4_luma_shift r0, r1, r2, r3, shift
 +        vaddl.s16   q5, \r0, \r2    // c0 = src0 + src2
 +        vaddl.s16   q2, \r2, \r3    // c1 = src2 + src3
 +        vsubl.s16   q4, \r0, \r3    // c2 = src0 - src3
 +        vmull.s16   q6, \r1, d0[0]  // c3 = 74 * src1
 +
 +        vaddl.s16   q7, \r0, \r3    // src0 + src3
 +        vsubw.s16   q7, q7, \r2     // src0 - src2 + src3
 +        vmul.s32    q7, q7, d0[0]   // dst2 = 74 * (src0 - src2 + src3)
 +
 +        vmul.s32    q8, q5, d0[1]   // 29 * c0
 +        vmul.s32    q9, q2, d1[0]   // 55 * c1
 +        vadd.s32    q8, q9          // 29 * c0 + 55 * c1
 +        vadd.s32    q8, q6          // dst0 = 29 * c0 + 55 * c1 + c3
 +
 +        vmul.s32    q2, q2, d0[1]   // 29 * c1
 +        vmul.s32    q9, q4, d1[0]   // 55 * c2
 +        vsub.s32    q9, q2          // 55 * c2 - 29 * c1
 +        vadd.s32    q9, q6          // dst1 = 55 * c2 - 29 * c1 + c3
 +
 +        vmul.s32    q5, q5, d1[0]   // 55 * c0
 +        vmul.s32    q4, q4, d0[1]   // 29 * c2
 +        vadd.s32    q5, q4          // 55 * c0 + 29 * c2
 +        vsub.s32    q5, q6          // dst3 = 55 * c0 + 29 * c2 - c3
 +
 +        vqrshrn.s32   \r0, q8, \shift
 +        vqrshrn.s32   \r1, q9, \shift
 +        vqrshrn.s32   \r2, q7, \shift
 +        vqrshrn.s32   \r3, q5, \shift
 +.endm
 +
- /* uses registers q2 - q6 for temp values */
- .macro tr4 r0, r1, r2, r3
-         vmull.s16  q4, \r1, d0[0]   // 83 * src1
-         vmull.s16  q6, \r1, d0[1]   // 36 * src1
-         vshll.s16  q2, \r0, #6   // 64 * src0
-         vshll.s16  q3, \r2, #6   // 64 * src2
-         vadd.s32   q5, q2, q3    // 64 * (src0 + src2)     e0
-         vsub.s32   q2, q2, q3    // 64 * (src0 - src2)     e1
-         vmlal.s16  q4, \r3, d0[1]   // 83 * src1 + 36 * src3  o0
-         vmlsl.s16  q6, \r3, d0[0]   // 36 * src1 - 83 * src3  o1
- 
-         vsub.s32   q3, q5, q4    // e0 - o0
-         vadd.s32   q4, q5, q4    // e0 + o0
-         vadd.s32   q5, q2, q6    // e1 + o1
-         vsub.s32   q6, q2, q6    // e1 - o1
- .endm
- 
- .macro tr4_shift r0, r1, r2, r3, shift
-         vmull.s16  q4, \r1, d0[0]   // 83 * src1
-         vmull.s16  q6, \r1, d0[1]   // 36 * src1
-         vshll.s16  q2, \r0, #6   // 64 * src0
-         vshll.s16  q3, \r2, #6   // 64 * src2
-         vadd.s32   q5, q2, q3    // 64 * (src0 + src2)     e0
-         vsub.s32   q2, q2, q3    // 64 * (src0 - src2)     e1
-         vmlal.s16  q4, \r3, d0[1]   // 83 * src1 + 36 * src3  o0
-         vmlsl.s16  q6, \r3, d0[0]   // 36 * src1 - 83 * src3  o1
- 
-         vsub.s32   q3, q5, q4    // e0 - o0
-         vadd.s32   q4, q5, q4    // e0 + o0
-         vadd.s32   q5, q2, q6    // e1 + o1
-         vsub.s32   q6, q2, q6    // e1 - o1
- 
-         vqrshrn.s32   \r0, q4, \shift
-         vqrshrn.s32   \r1, q5, \shift
-         vqrshrn.s32   \r2, q6, \shift
-         vqrshrn.s32   \r3, q3, \shift
- .endm
- 
- function ff_hevc_transform_4x4_neon_8, export=1
-         vpush       {d8-d15}
-         vld1.16     {q14, q15}, [r0]  // coeffs
-         ldr         r3, =0x00240053 // 36 and 83
-         vmov.32     d0[0], r3
- 
-         tr4_shift d28, d29, d30, d31, #7
- 
-         vtrn.16     d28, d29
-         vtrn.16     d30, d31
-         vtrn.32     q14, q15
- 
-         tr4_shift d28, d29, d30, d31, #12
- 
-         vtrn.16     d28, d29
-         vtrn.16     d30, d31
-         vtrn.32     q14, q15
- 
-         vst1.16     {q14, q15}, [r0]
-         vpop        {d8-d15}
-         bx lr
- endfunc
- 
 +function ff_hevc_transform_luma_4x4_neon_8, export=1
 +        vpush       {d8-d15}
 +        vld1.16     {q14, q15}, [r0]  // coeffs
 +        ldr         r3, =0x4a  // 74
 +        vmov.32     d0[0], r3
 +        ldr         r3, =0x1d  // 29
 +        vmov.32     d0[1], r3
 +        ldr         r3, =0x37  // 55
 +        vmov.32     d1[0], r3
 +
 +        tr4_luma_shift d28, d29, d30, d31, #7
 +
 +        vtrn.16     d28, d29
 +        vtrn.16     d30, d31
 +        vtrn.32     q14, q15
 +
 +        tr4_luma_shift d28, d29, d30, d31, #12
 +
 +        vtrn.16     d28, d29
 +        vtrn.16     d30, d31
 +        vtrn.32     q14, q15
 +        vst1.16     {q14, q15}, [r0]
 +        vpop        {d8-d15}
 +        bx lr
 +endfunc
 +
- .macro tr8_begin in0, in1, in2, in3
-         vmull.s16  q7, \in0, d1[1]   // 89 * src1
-         vmull.s16  q8, \in0, d1[0]   // 75 * src1
-         vmull.s16  q9, \in0, d1[3]   // 50 * src1
-         vmull.s16  q10, \in0, d1[2]  // 18 * src1
- 
-         vmlal.s16  q7, \in1, d1[0]   // 75 * src3
-         vmlsl.s16  q8, \in1, d1[2]   //-18 * src3
-         vmlsl.s16  q9, \in1, d1[1]   //-89 * src3
-         vmlsl.s16  q10, \in1, d1[3]  //-50 * src3
- 
-         vmlal.s16  q7, \in2, d1[3]   // 50 * src5
-         vmlsl.s16  q8, \in2, d1[1]   //-89 * src5
-         vmlal.s16  q9, \in2, d1[2]   // 18 * src5
-         vmlal.s16  q10, \in2, d1[0]  // 75 * src5
- 
-         vmlal.s16  q7, \in3, d1[2]   // 18 * src7
-         vmlsl.s16  q8, \in3, d1[3]   //-50 * src7
-         vmlal.s16  q9, \in3, d1[0]   // 75 * src7
-         vmlsl.s16  q10, \in3, d1[1]  //-89 * src7
++.macro sum_sub out, in, c, op
++  .ifc \op, +
++        vmlal.s16       \out, \in, \c
++  .else
++        vmlsl.s16       \out, \in, \c
++  .endif
++.endm
++
++.macro tr_4x4 in0, in1, in2, in3, out0, out1, out2, out3, shift, tmp0, tmp1, 
tmp2, tmp3, tmp4
++         vshll.s16      \tmp0, \in0, #6
++         vmull.s16      \tmp2, \in1, d4[1]
++         vmov           \tmp1, \tmp0
++         vmull.s16      \tmp3, \in1, d4[3]
++         vmlal.s16      \tmp0, \in2, d4[0] @e0
++         vmlsl.s16      \tmp1, \in2, d4[0] @e1
++         vmlal.s16      \tmp2, \in3, d4[3] @o0
++         vmlsl.s16      \tmp3, \in3, d4[1] @o1
++
++         vadd.s32       \tmp4, \tmp0, \tmp2
++         vsub.s32       \tmp0, \tmp0, \tmp2
++         vadd.s32       \tmp2, \tmp1, \tmp3
++         vsub.s32       \tmp1, \tmp1, \tmp3
++         vqrshrn.s32    \out0, \tmp4, #\shift
++         vqrshrn.s32    \out3, \tmp0, #\shift
++         vqrshrn.s32    \out1, \tmp2, #\shift
++         vqrshrn.s32    \out2, \tmp1, #\shift
++.endm
++
++.macro tr_4x4_8 in0, in1, in2, in3, out0, out1, out2, out3, tmp0, tmp1, tmp2, 
tmp3
++         vshll.s16      \tmp0, \in0, #6
++         vld1.s16       {\in0}, [r1, :64]!
++         vmov           \tmp1, \tmp0
++         vmull.s16      \tmp2, \in1, \in0[1]
++         vmull.s16      \tmp3, \in1, \in0[3]
++         vmlal.s16      \tmp0, \in2, \in0[0] @e0
++         vmlsl.s16      \tmp1, \in2, \in0[0] @e1
++         vmlal.s16      \tmp2, \in3, \in0[3] @o0
++         vmlsl.s16      \tmp3, \in3, \in0[1] @o1
++
++         vld1.s16       {\in0}, [r1, :64]
++
++         vadd.s32       \out0, \tmp0, \tmp2
++         vadd.s32       \out1, \tmp1, \tmp3
++         vsub.s32       \out2, \tmp1, \tmp3
++         vsub.s32       \out3, \tmp0, \tmp2
++
++         sub            r1,  r1,  #8
 +.endm
 +
- .macro tr8_end shift
-         vadd.s32   q1, q4, q7   //  e_8[0] + o_8[0], dst[0]
-         vsub.s32   q4, q4, q7   //  e_8[0] - o_8[0], dst[7]
- 
-         vadd.s32   q2, q5, q8   // e_8[1] + o_8[1], dst[1]
-         vsub.s32   q5, q5, q8   // e_8[1] - o_8[1], dst[6]
- 
-         vadd.s32   q11, q6, q9  // e_8[2] + o_8[2], dst[2]
-         vsub.s32    q6, q6, q9  // e_8[2] - o_8[2], dst[5]
- 
-         vadd.s32   q12, q3, q10 // e_8[3] + o_8[3], dst[3]
-         vsub.s32   q3, q3, q10  // e_8[3] - o_8[3], dst[4]
-         vqrshrn.s32   d2, q1, \shift
-         vqrshrn.s32   d3, q2, \shift
-         vqrshrn.s32   d4, q11, \shift
-         vqrshrn.s32   d5, q12, \shift
-         vqrshrn.s32   d6, q3, \shift
-         vqrshrn.s32   d7, q6, \shift
-         vqrshrn.s32   d9, q4, \shift
-         vqrshrn.s32   d8, q5, \shift
++@ Do a 4x4 transpose, using q registers for the subtransposes that don't
++@ need to address the indiviudal d registers.
++@ r0,r1 == rq0, r2,r3 == rq1
++.macro transpose_4x4 rq0, rq1, r0, r1, r2, r3
++        vtrn.32         \rq0, \rq1
++        vtrn.16         \r0,  \r1
++        vtrn.16         \r2,  \r3
 +.endm
 +
- function ff_hevc_transform_8x8_neon_8, export=1
-         push   {r4-r8}
-         vpush {d8-d15}
-         mov    r5, #16
- 
-         adr       r3, tr4f
-         vld1.16   {d0, d1}, [r3]
- 
-         // left half
-         vld1.16 {d24}, [r0], r5
-         vld1.16 {d25}, [r0], r5
-         vld1.16 {d26}, [r0], r5
-         vld1.16 {d27}, [r0], r5
-         vld1.16 {d28}, [r0], r5
-         vld1.16 {d29}, [r0], r5
-         vld1.16 {d30}, [r0], r5
-         vld1.16 {d31}, [r0], r5
-         sub      r0, #128
-         tr8_begin d25, d27, d29, d31
-         tr4       d24, d26, d28, d30
-         tr8_end   #7
-         vst1.16 {d2}, [r0], r5
-         vst1.16 {d3}, [r0], r5
-         vst1.16 {d4}, [r0], r5
-         vst1.16 {d5}, [r0], r5
-         vst1.16 {d6}, [r0], r5
-         vst1.16 {d7}, [r0], r5
-         vst1.16 {d8}, [r0], r5
-         vst1.16 {d9}, [r0], r5
-         sub      r0, #128
-         //skip right half if col_limit in r1 is less than 4
-         cmp      r1, #4
-         blt      1f
-         //right half
-         add      r0, #8
-         vld1.16 {d24}, [r0], r5
-         vld1.16 {d25}, [r0], r5
-         vld1.16 {d26}, [r0], r5
-         vld1.16 {d27}, [r0], r5
-         vld1.16 {d28}, [r0], r5
-         vld1.16 {d29}, [r0], r5
-         vld1.16 {d30}, [r0], r5
-         vld1.16 {d31}, [r0], r5
-         sub      r0, #128
-         tr8_begin d25, d27, d29, d31
-         tr4       d24, d26, d28, d30
-         tr8_end   #7
-         vst1.16 {d2}, [r0], r5
-         vst1.16 {d3}, [r0], r5
-         vst1.16 {d4}, [r0], r5
-         vst1.16 {d5}, [r0], r5
-         vst1.16 {d6}, [r0], r5
-         vst1.16 {d7}, [r0], r5
-         vst1.16 {d8}, [r0], r5
-         vst1.16 {d9}, [r0], r5
-         sub      r0, #136
- 1:
-         // top half
-         vldm r0, {q12-q15} // coeffs
-         transpose_16b_4x4 d24, d26, d28, d30
-         transpose_16b_4x4 d25, d27, d29, d31
-         tr8_begin d26, d30, d27, d31
-         tr4 d24, d28, d25, d29
-         tr8_end #12
-         transpose_16b_4x4 d2, d3, d4, d5
-         transpose_16b_4x4 d6, d7, d8, d9
-         vswp     d7, d5
-         vswp     d7, d8
-         vswp     d3, d6
-         vswp     d6, d4
-         vstm r0!, {q1-q4}
- 
-         // bottom half
-         vldm r0, {q12-q15} // coeffs
-         transpose_16b_4x4 d24, d26, d28, d30
-         transpose_16b_4x4 d25, d27, d29, d31
-         tr8_begin d26, d30, d27, d31
-         tr4 d24, d28, d25, d29
-         tr8_end #12
-         transpose_16b_4x4 d2, d3, d4, d5
-         transpose_16b_4x4 d6, d7, d8, d9
-         vswp     d7, d5
-         vswp     d7, d8
-         vswp     d3, d6
-         vswp     d6, d4
-         //vstm     r0, {q1-q4}
-         vst1.16 {q1-q2}, [r0]
-         add     r0, #32
-         vst1.16 {q3-q4}, [r0]
-         sub     r0, #32
-         vpop {d8-d15}
-         pop {r4-r8}
++.macro idct_4x4 bitdepth
++function ff_hevc_idct_4x4_\bitdepth\()_neon, export=1
++@r0 - coeffs
++        vld1.s16        {q0-q1}, [r0, :128]
++
++        movrel          r1, trans
++        vld1.s16        {d4}, [r1, :64]
++
++        tr_4x4          d0, d1, d2, d3, d16, d17, d18, d19, 7, q10, q11, q12, 
q13, q0
++        transpose_4x4   q8, q9, d16, d17, d18, d19
++
++        tr_4x4          d16, d17, d18, d19, d0, d1, d2, d3, 20 - \bitdepth, 
q10, q11, q12, q13, q0
++        transpose_4x4   q0, q1, d0, d1, d2, d3
++        vst1.s16        {d0-d3}, [r0, :128]
 +        bx lr
 +endfunc
++.endm
++
++.macro transpose8_4x4 r0, r1, r2, r3
++        vtrn.16         \r0,  \r1
++        vtrn.16         \r2,  \r3
++        vtrn.32         \r0,  \r2
++        vtrn.32         \r1,  \r3
++.endm
++
++.macro transpose_8x8 r0, r1, r2, r3, r4, r5, r6, r7, l0, l1, l2, l3, l4, l5, 
l6, l7
++        transpose8_4x4  \r0, \r1, \r2, \r3
++        transpose8_4x4  \r4, \r5, \r6, \r7
++
++        transpose8_4x4  \l0, \l1, \l2, \l3
++        transpose8_4x4  \l4, \l5, \l6, \l7
++.endm
++
++.macro tr_8x4 shift, in0, in1, in2, in3, in4, in5, in6, in7
++        tr_4x4_8        \in0, \in2, \in4, \in6, q8, q9, q10, q11, q12, q13, 
q14, q15
++
++        vmull.s16       q14, \in1, \in0[2]
++        vmull.s16       q12, \in1, \in0[0]
++        vmull.s16       q13, \in1, \in0[1]
++        sum_sub         q14, \in3, \in0[0], -
++        sum_sub         q12, \in3, \in0[1], +
++        sum_sub         q13, \in3, \in0[3], -
++
++        sum_sub         q14, \in5, \in0[3], +
++        sum_sub         q12, \in5, \in0[2], +
++        sum_sub         q13, \in5, \in0[0], -
++
++        sum_sub         q14, \in7, \in0[1], +
++        sum_sub         q12, \in7, \in0[3], +
++        sum_sub         q13, \in7, \in0[2], -
++
++        vadd.s32        q15, q10, q14
++        vsub.s32        q10, q10, q14
++        vqrshrn.s32     \in2, q15, \shift
++
++        vmull.s16       q15, \in1, \in0[3]
++        sum_sub         q15, \in3, \in0[2], -
++        sum_sub         q15, \in5, \in0[1], +
++        sum_sub         q15, \in7, \in0[0], -
++
++        vqrshrn.s32     \in5, q10,  \shift
++
++        vadd.s32        q10, q8, q12
++        vsub.s32        q8,  q8, q12
++        vadd.s32        q12, q9, q13
++        vsub.s32        q9,  q9, q13
++        vadd.s32        q14, q11, q15
++        vsub.s32        q11, q11, q15
++
++        vqrshrn.s32     \in0, q10, \shift
++        vqrshrn.s32     \in7, q8,  \shift
++        vqrshrn.s32     \in1, q12, \shift
++        vqrshrn.s32     \in6, q9,  \shift
++        vqrshrn.s32     \in3, q14, \shift
++        vqrshrn.s32     \in4, q11, \shift
++.endm
++
++.macro idct_8x8 bitdepth
++function ff_hevc_idct_8x8_\bitdepth\()_neon, export=1
++@r0 - coeffs
++        vpush           {q4-q7}
++
++        mov             r1,  r0
++        mov             r2,  #64
++        add             r3,  r0,  #32
++        vld1.s16        {q0-q1}, [r1,:128], r2
++        vld1.s16        {q2-q3}, [r3,:128], r2
++        vld1.s16        {q4-q5}, [r1,:128], r2
++        vld1.s16        {q6-q7}, [r3,:128], r2
++
++        movrel          r1, trans
++
++        tr_8x4          7, d0, d2, d4, d6, d8, d10, d12, d14
++        tr_8x4          7, d1, d3, d5, d7, d9, d11, d13, d15
++
++        @ Transpose each 4x4 block, and swap how d4-d7 and d8-d11 are used.
++        @ Layout before:
++        @ d0  d1
++        @ d2  d3
++        @ d4  d5
++        @ d6  d7
++        @ d8  d9
++        @ d10 d11
++        @ d12 d13
++        @ d14 d15
++        transpose_8x8   d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, 
d9, d11, d13, d15
++        @ Now the layout is:
++        @ d0  d8
++        @ d2  d10
++        @ d4  d12
++        @ d6  d14
++        @ d1  d9
++        @ d3  d11
++        @ d5  d13
++        @ d7  d15
++
++        tr_8x4          20 - \bitdepth, d0, d2, d4, d6, d1, d3, d5, d7
++        vswp            d0, d8
++        tr_8x4          20 - \bitdepth, d0, d10, d12, d14, d9, d11, d13, d15
++        vswp            d0, d8
++
++        transpose_8x8   d0, d2, d4, d6, d8, d10, d12, d14, d1, d3, d5, d7, 
d9, d11, d13, d15
++
++        mov             r1,  r0
++        mov             r2,  #64
++        add             r3,  r0,  #32
++        vst1.s16        {q0-q1}, [r1,:128], r2
++        vst1.s16        {q2-q3}, [r3,:128], r2
++        vst1.s16        {q4-q5}, [r1,:128], r2
++        vst1.s16        {q6-q7}, [r3,:128], r2
++
++        vpop            {q4-q7}
++        bx              lr
++endfunc
++.endm
 +
- .align 4
- tr4f:
- .word 0x00240053  // 36 and d1[0] = 83
- .word 0x00000000
- tr8f:
- .word 0x0059004b  // 89, d0[0] = 75
- .word 0x00320012  // 50, d0[2] = 18
- tr16:
- .word 0x005a0057  // 90, d2[0] = 87
- .word 0x00500046  // 80, d2[2] = 70
- .word 0x0039002b  // 57, d2[0] = 43
- .word 0x00190009  // 25, d2[2] = 9
++idct_4x4 8
++idct_4x4 10
++idct_8x8 8
++idct_8x8 10
diff --cc libavcodec/arm/hevcdsp_init_arm.c
index adcc454511,1e984e6188..e8fa1f79ac
--- a/libavcodec/arm/hevcdsp_init_arm.c
+++ b/libavcodec/arm/hevcdsp_init_arm.c
@@@ -19,14 -20,28 +19,16 @@@
   */
  
  #include "libavutil/attributes.h"
+ #include "libavutil/cpu.h"
  #include "libavutil/arm/cpu.h"
+ 
  #include "libavcodec/hevcdsp.h"
 +#include "hevcdsp_arm.h"
  
- av_cold void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth)
 -void ff_hevc_idct_4x4_8_neon(int16_t *coeffs, int col_limit);
 -void ff_hevc_idct_8x8_8_neon(int16_t *coeffs, int col_limit);
 -void ff_hevc_idct_4x4_10_neon(int16_t *coeffs, int col_limit);
 -void ff_hevc_idct_8x8_10_neon(int16_t *coeffs, int col_limit);
 -
 -av_cold void ff_hevc_dsp_init_arm(HEVCDSPContext *c, int bit_depth)
++av_cold void ff_hevc_dsp_init_arm(HEVCDSPContext *c, const int bit_depth)
  {
      int cpu_flags = av_get_cpu_flags();
  
 -    if (have_neon(cpu_flags)) {
 -        if (bit_depth == 8) {
 -            c->idct[0] = ff_hevc_idct_4x4_8_neon;
 -            c->idct[1] = ff_hevc_idct_8x8_8_neon;
 -        }
 -        if (bit_depth == 10) {
 -            c->idct[0] = ff_hevc_idct_4x4_10_neon;
 -            c->idct[1] = ff_hevc_idct_8x8_10_neon;
 -        }
 -    }
 +    if (have_neon(cpu_flags))
-         ff_hevcdsp_init_neon(c, bit_depth);
++        ff_hevc_dsp_init_neon(c, bit_depth);
  }
diff --cc libavcodec/arm/hevcdsp_init_neon.c
index 1a3912c609,0000000000..8bc430e012
mode 100644,000000..100644
--- a/libavcodec/arm/hevcdsp_init_neon.c
+++ b/libavcodec/arm/hevcdsp_init_neon.c
@@@ -1,224 -1,0 +1,231 @@@
 +/*
 + * Copyright (c) 2014 Seppo Tomperi <seppo.tomp...@vtt.fi>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
USA
 + */
 +
 +#include "libavutil/attributes.h"
 +#include "libavutil/arm/cpu.h"
 +#include "libavcodec/hevcdsp.h"
 +#include "hevcdsp_arm.h"
 +
 +void ff_hevc_v_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
 +void ff_hevc_h_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
 +void ff_hevc_v_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
*_tc, uint8_t *_no_p, uint8_t *_no_q);
 +void ff_hevc_h_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
*_tc, uint8_t *_no_p, uint8_t *_no_q);
- void ff_hevc_transform_4x4_neon_8(int16_t *coeffs, int col_limit);
- void ff_hevc_transform_8x8_neon_8(int16_t *coeffs, int col_limit);
++void ff_hevc_idct_4x4_8_neon(int16_t *coeffs, int col_limit);
++void ff_hevc_idct_8x8_8_neon(int16_t *coeffs, int col_limit);
++void ff_hevc_idct_4x4_10_neon(int16_t *coeffs, int col_limit);
++void ff_hevc_idct_8x8_10_neon(int16_t *coeffs, int col_limit);
 +void ff_hevc_idct_4x4_dc_neon_8(int16_t *coeffs);
 +void ff_hevc_idct_8x8_dc_neon_8(int16_t *coeffs);
 +void ff_hevc_idct_16x16_dc_neon_8(int16_t *coeffs);
 +void ff_hevc_idct_32x32_dc_neon_8(int16_t *coeffs);
 +void ff_hevc_transform_luma_4x4_neon_8(int16_t *coeffs);
 +void ff_hevc_add_residual_4x4_neon_8(uint8_t *_dst, int16_t *coeffs,
 +                                     ptrdiff_t stride);
 +void ff_hevc_add_residual_8x8_neon_8(uint8_t *_dst, int16_t *coeffs,
 +                                     ptrdiff_t stride);
 +void ff_hevc_add_residual_16x16_neon_8(uint8_t *_dst, int16_t *coeffs,
 +                                       ptrdiff_t stride);
 +void ff_hevc_add_residual_32x32_neon_8(uint8_t *_dst, int16_t *coeffs,
 +                                       ptrdiff_t stride);
 +
 +#define PUT_PIXELS(name) \
 +    void name(int16_t *dst, uint8_t *src, \
 +                                ptrdiff_t srcstride, int height, \
 +                                intptr_t mx, intptr_t my, int width)
 +PUT_PIXELS(ff_hevc_put_pixels_w2_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w4_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w6_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w8_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w12_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w16_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w24_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w32_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w48_neon_8);
 +PUT_PIXELS(ff_hevc_put_pixels_w64_neon_8);
 +#undef PUT_PIXELS
 +
 +static void (*put_hevc_qpel_neon[4][4])(int16_t *dst, ptrdiff_t dststride, 
uint8_t *src, ptrdiff_t srcstride,
 +                                   int height, int width);
 +static void (*put_hevc_qpel_uw_neon[4][4])(uint8_t *dst, ptrdiff_t dststride, 
uint8_t *_src, ptrdiff_t _srcstride,
 +                                   int width, int height, int16_t* src2, 
ptrdiff_t src2stride);
 +void ff_hevc_put_qpel_neon_wrapper(int16_t *dst, uint8_t *src, ptrdiff_t 
srcstride,
 +                                   int height, intptr_t mx, intptr_t my, int 
width);
 +void ff_hevc_put_qpel_uni_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, 
uint8_t *src, ptrdiff_t srcstride,
 +                                   int height, intptr_t mx, intptr_t my, int 
width);
 +void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, 
uint8_t *src, ptrdiff_t srcstride,
 +                                       int16_t *src2,
 +                                       int height, intptr_t mx, intptr_t my, 
int width);
 +#define QPEL_FUNC(name) \
 +    void name(int16_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t 
srcstride, \
 +                                   int height, int width)
 +
 +QPEL_FUNC(ff_hevc_put_qpel_v1_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_v2_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_v3_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h1_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h2_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h3_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h1v1_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h1v2_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h1v3_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h2v1_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h2v2_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h2v3_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h3v1_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h3v2_neon_8);
 +QPEL_FUNC(ff_hevc_put_qpel_h3v3_neon_8);
 +#undef QPEL_FUNC
 +
 +#define QPEL_FUNC_UW_PIX(name) \
 +    void name(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t 
_srcstride, \
 +                                   int height, intptr_t mx, intptr_t my, int 
width);
 +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w4_neon_8);
 +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w8_neon_8);
 +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w16_neon_8);
 +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w24_neon_8);
 +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w32_neon_8);
 +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w48_neon_8);
 +QPEL_FUNC_UW_PIX(ff_hevc_put_qpel_uw_pixels_w64_neon_8);
 +#undef QPEL_FUNC_UW_PIX
 +
 +#define QPEL_FUNC_UW(name) \
 +    void name(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t 
_srcstride, \
 +                                   int width, int height, int16_t* src2, 
ptrdiff_t src2stride);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_pixels_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v1_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v2_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_v3_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v1_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v2_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h1v3_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v1_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v2_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h2v3_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v1_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v2_neon_8);
 +QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v3_neon_8);
 +#undef QPEL_FUNC_UW
 +
 +void ff_hevc_put_qpel_neon_wrapper(int16_t *dst, uint8_t *src, ptrdiff_t 
srcstride,
 +                                   int height, intptr_t mx, intptr_t my, int 
width) {
 +
 +    put_hevc_qpel_neon[my][mx](dst, MAX_PB_SIZE, src, srcstride, height, 
width);
 +}
 +
 +void ff_hevc_put_qpel_uni_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, 
uint8_t *src, ptrdiff_t srcstride,
 +                                   int height, intptr_t mx, intptr_t my, int 
width) {
 +
 +    put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, 
height, NULL, 0);
 +}
 +
 +void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, 
uint8_t *src, ptrdiff_t srcstride,
 +                                       int16_t *src2,
 +                                       int height, intptr_t mx, intptr_t my, 
int width) {
 +    put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, 
height, src2, MAX_PB_SIZE);
 +}
 +
- av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth)
++av_cold void ff_hevc_dsp_init_neon(HEVCDSPContext *c, const int bit_depth)
 +{
 +    if (bit_depth == 8) {
 +        int x;
 +        c->hevc_v_loop_filter_luma     = ff_hevc_v_loop_filter_luma_neon;
 +        c->hevc_h_loop_filter_luma     = ff_hevc_h_loop_filter_luma_neon;
 +        c->hevc_v_loop_filter_chroma   = ff_hevc_v_loop_filter_chroma_neon;
 +        c->hevc_h_loop_filter_chroma   = ff_hevc_h_loop_filter_chroma_neon;
-         c->idct[0]                     = ff_hevc_transform_4x4_neon_8;
-         c->idct[1]                     = ff_hevc_transform_8x8_neon_8;
++        c->idct[0]                     = ff_hevc_idct_4x4_8_neon;
++        c->idct[1]                     = ff_hevc_idct_8x8_8_neon;
 +        c->idct_dc[0]                  = ff_hevc_idct_4x4_dc_neon_8;
 +        c->idct_dc[1]                  = ff_hevc_idct_8x8_dc_neon_8;
 +        c->idct_dc[2]                  = ff_hevc_idct_16x16_dc_neon_8;
 +        c->idct_dc[3]                  = ff_hevc_idct_32x32_dc_neon_8;
 +        c->add_residual[0]             = ff_hevc_add_residual_4x4_neon_8;
 +        c->add_residual[1]             = ff_hevc_add_residual_8x8_neon_8;
 +        c->add_residual[2]             = ff_hevc_add_residual_16x16_neon_8;
 +        c->add_residual[3]             = ff_hevc_add_residual_32x32_neon_8;
 +        c->transform_4x4_luma          = ff_hevc_transform_luma_4x4_neon_8;
 +        put_hevc_qpel_neon[1][0]       = ff_hevc_put_qpel_v1_neon_8;
 +        put_hevc_qpel_neon[2][0]       = ff_hevc_put_qpel_v2_neon_8;
 +        put_hevc_qpel_neon[3][0]       = ff_hevc_put_qpel_v3_neon_8;
 +        put_hevc_qpel_neon[0][1]       = ff_hevc_put_qpel_h1_neon_8;
 +        put_hevc_qpel_neon[0][2]       = ff_hevc_put_qpel_h2_neon_8;
 +        put_hevc_qpel_neon[0][3]       = ff_hevc_put_qpel_h3_neon_8;
 +        put_hevc_qpel_neon[1][1]       = ff_hevc_put_qpel_h1v1_neon_8;
 +        put_hevc_qpel_neon[1][2]       = ff_hevc_put_qpel_h2v1_neon_8;
 +        put_hevc_qpel_neon[1][3]       = ff_hevc_put_qpel_h3v1_neon_8;
 +        put_hevc_qpel_neon[2][1]       = ff_hevc_put_qpel_h1v2_neon_8;
 +        put_hevc_qpel_neon[2][2]       = ff_hevc_put_qpel_h2v2_neon_8;
 +        put_hevc_qpel_neon[2][3]       = ff_hevc_put_qpel_h3v2_neon_8;
 +        put_hevc_qpel_neon[3][1]       = ff_hevc_put_qpel_h1v3_neon_8;
 +        put_hevc_qpel_neon[3][2]       = ff_hevc_put_qpel_h2v3_neon_8;
 +        put_hevc_qpel_neon[3][3]       = ff_hevc_put_qpel_h3v3_neon_8;
 +        put_hevc_qpel_uw_neon[1][0]      = ff_hevc_put_qpel_uw_v1_neon_8;
 +        put_hevc_qpel_uw_neon[2][0]      = ff_hevc_put_qpel_uw_v2_neon_8;
 +        put_hevc_qpel_uw_neon[3][0]      = ff_hevc_put_qpel_uw_v3_neon_8;
 +        put_hevc_qpel_uw_neon[0][1]      = ff_hevc_put_qpel_uw_h1_neon_8;
 +        put_hevc_qpel_uw_neon[0][2]      = ff_hevc_put_qpel_uw_h2_neon_8;
 +        put_hevc_qpel_uw_neon[0][3]      = ff_hevc_put_qpel_uw_h3_neon_8;
 +        put_hevc_qpel_uw_neon[1][1]      = ff_hevc_put_qpel_uw_h1v1_neon_8;
 +        put_hevc_qpel_uw_neon[1][2]      = ff_hevc_put_qpel_uw_h2v1_neon_8;
 +        put_hevc_qpel_uw_neon[1][3]      = ff_hevc_put_qpel_uw_h3v1_neon_8;
 +        put_hevc_qpel_uw_neon[2][1]      = ff_hevc_put_qpel_uw_h1v2_neon_8;
 +        put_hevc_qpel_uw_neon[2][2]      = ff_hevc_put_qpel_uw_h2v2_neon_8;
 +        put_hevc_qpel_uw_neon[2][3]      = ff_hevc_put_qpel_uw_h3v2_neon_8;
 +        put_hevc_qpel_uw_neon[3][1]      = ff_hevc_put_qpel_uw_h1v3_neon_8;
 +        put_hevc_qpel_uw_neon[3][2]      = ff_hevc_put_qpel_uw_h2v3_neon_8;
 +        put_hevc_qpel_uw_neon[3][3]      = ff_hevc_put_qpel_uw_h3v3_neon_8;
 +        for (x = 0; x < 10; x++) {
 +            c->put_hevc_qpel[x][1][0]         = ff_hevc_put_qpel_neon_wrapper;
 +            c->put_hevc_qpel[x][0][1]         = ff_hevc_put_qpel_neon_wrapper;
 +            c->put_hevc_qpel[x][1][1]         = ff_hevc_put_qpel_neon_wrapper;
 +            c->put_hevc_qpel_uni[x][1][0]     = 
ff_hevc_put_qpel_uni_neon_wrapper;
 +            c->put_hevc_qpel_uni[x][0][1]     = 
ff_hevc_put_qpel_uni_neon_wrapper;
 +            c->put_hevc_qpel_uni[x][1][1]     = 
ff_hevc_put_qpel_uni_neon_wrapper;
 +            c->put_hevc_qpel_bi[x][1][0]      = 
ff_hevc_put_qpel_bi_neon_wrapper;
 +            c->put_hevc_qpel_bi[x][0][1]      = 
ff_hevc_put_qpel_bi_neon_wrapper;
 +            c->put_hevc_qpel_bi[x][1][1]      = 
ff_hevc_put_qpel_bi_neon_wrapper;
 +        }
 +        c->put_hevc_qpel[0][0][0]  = ff_hevc_put_pixels_w2_neon_8;
 +        c->put_hevc_qpel[1][0][0]  = ff_hevc_put_pixels_w4_neon_8;
 +        c->put_hevc_qpel[2][0][0]  = ff_hevc_put_pixels_w6_neon_8;
 +        c->put_hevc_qpel[3][0][0]  = ff_hevc_put_pixels_w8_neon_8;
 +        c->put_hevc_qpel[4][0][0]  = ff_hevc_put_pixels_w12_neon_8;
 +        c->put_hevc_qpel[5][0][0]  = ff_hevc_put_pixels_w16_neon_8;
 +        c->put_hevc_qpel[6][0][0]  = ff_hevc_put_pixels_w24_neon_8;
 +        c->put_hevc_qpel[7][0][0]  = ff_hevc_put_pixels_w32_neon_8;
 +        c->put_hevc_qpel[8][0][0]  = ff_hevc_put_pixels_w48_neon_8;
 +        c->put_hevc_qpel[9][0][0]  = ff_hevc_put_pixels_w64_neon_8;
 +
 +        c->put_hevc_qpel_uni[1][0][0]  = ff_hevc_put_qpel_uw_pixels_w4_neon_8;
 +        c->put_hevc_qpel_uni[3][0][0]  = ff_hevc_put_qpel_uw_pixels_w8_neon_8;
 +        c->put_hevc_qpel_uni[5][0][0]  = 
ff_hevc_put_qpel_uw_pixels_w16_neon_8;
 +        c->put_hevc_qpel_uni[6][0][0]  = 
ff_hevc_put_qpel_uw_pixels_w24_neon_8;
 +        c->put_hevc_qpel_uni[7][0][0]  = 
ff_hevc_put_qpel_uw_pixels_w32_neon_8;
 +        c->put_hevc_qpel_uni[8][0][0]  = 
ff_hevc_put_qpel_uw_pixels_w48_neon_8;
 +        c->put_hevc_qpel_uni[9][0][0]  = 
ff_hevc_put_qpel_uw_pixels_w64_neon_8;
 +    }
++
++    if (bit_depth == 10) {
++        c->idct[0] = ff_hevc_idct_4x4_10_neon;
++        c->idct[1] = ff_hevc_idct_8x8_10_neon;
++    }
 +}
diff --cc libavcodec/hevcdsp.c
index e432aa3cf9,81db9e29a5..957e40d5ff
--- a/libavcodec/hevcdsp.c
+++ b/libavcodec/hevcdsp.c
@@@ -261,8 -251,4 +263,6 @@@ int i = 0
          ff_hevc_dsp_init_ppc(hevcdsp, bit_depth);
      if (ARCH_X86)
          ff_hevc_dsp_init_x86(hevcdsp, bit_depth);
-     if (ARCH_ARM)
-         ff_hevcdsp_init_arm(hevcdsp, bit_depth);
 +    if (ARCH_MIPS)
 +        ff_hevc_dsp_init_mips(hevcdsp, bit_depth);
  }
diff --cc libavcodec/hevcdsp.h
index dc48ebca11,7fc6f9cf8a..0ae67cba85
--- a/libavcodec/hevcdsp.h
+++ b/libavcodec/hevcdsp.h
@@@ -124,11 -115,13 +124,12 @@@ typedef struct HEVCDSPContext 
  
  void ff_hevc_dsp_init(HEVCDSPContext *hpc, int bit_depth);
  
 +extern const int8_t ff_hevc_epel_filters[7][4];
 +extern const int8_t ff_hevc_qpel_filters[3][16];
 +
+ void ff_hevc_dsp_init_arm(HEVCDSPContext *c, const int bit_depth);
  void ff_hevc_dsp_init_ppc(HEVCDSPContext *c, const int bit_depth);
  void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int bit_depth);
- void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth);
 -
 -extern const int16_t ff_hevc_epel_coeffs[7][16];
 -extern const int8_t ff_hevc_epel_coeffs8[7][16];
 -extern const int16_t ff_hevc_qpel_coeffs[3][8];
 -extern const int8_t ff_hevc_qpel_coeffs8[3][16];
 +void ff_hevc_dsp_init_mips(HEVCDSPContext *c, const int bit_depth);
+ 
  #endif /* AVCODEC_HEVCDSP_H */

_______________________________________________
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog

Reply via email to