Hi, As title, move some arm_neon.h functions which currently use assembly over to intrinsics.
Bootstrapped and tested on aarch64-none-linux-gnu. OK, if so can someone please apply on my behalf? Thanks, James --- gcc/ 2020-02-18 James Greenhalgh <james.greenha...@arm.com> * config/aarch64/aarch64-simd-builtins.def (intrinsic_vec_smult_lo_): New. (intrinsic_vec_umult_lo_): Likewise. (vec_widen_smult_hi_): Likewise. (vec_widen_umult_hi_): Likewise. * config/aarch64/aarch64-simd.md (aarch64_intrinsic_vec_<su>mult_lo_<mode>): New. * config/aarch64/arm_neon.h (vmull_high_s8): Use intrinsics. (vmull_high_s16): Likewise. (vmull_high_s32): Likewise. (vmull_high_u8): Likewise. (vmull_high_u16): Likewise. (vmull_high_u32): Likewise. (vmull_s8): Likewise. (vmull_s16): Likewise. (vmull_s32): Likewise. (vmull_u8): Likewise. (vmull_u16): Likewise. (vmull_u32): Likewise. gcc/testsuite/ 2020-02-18 James Greenhalgh <james.greenha...@arm.com> * gcc.target/aarch64/vmull_high.c: New.
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 57fc5933b43..f86866b9e78 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -185,6 +185,12 @@ BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n, 0) BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n, 0) + BUILTIN_VD_BHSI (BINOP, intrinsic_vec_smult_lo_, 0) + BUILTIN_VD_BHSI (BINOPU, intrinsic_vec_umult_lo_, 0) + + BUILTIN_VQW (BINOP, vec_widen_smult_hi_, 10) + BUILTIN_VQW (BINOPU, vec_widen_umult_hi_, 10) + BUILTIN_VSD_HSI (BINOP, sqdmull, 0) BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0) BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 4e28cf97516..281b9ce93b9 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1791,6 +1791,17 @@ [(set_attr "type" "neon_mul_<Vetype>_long")] ) +(define_insn "aarch64_intrinsic_vec_<su>mult_lo_<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (mult:<VWIDE> (ANY_EXTEND:<VWIDE> + (match_operand:VD_BHSI 1 "register_operand" "w")) + (ANY_EXTEND:<VWIDE> + (match_operand:VD_BHSI 2 "register_operand" "w"))))] + "TARGET_SIMD" + "<su>mull\\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_mul_<Vetype>_long")] +) + (define_expand "vec_widen_<su>mult_lo_<mode>" [(match_operand:<VWIDE> 0 "register_operand") (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand")) diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index c7425346b86..0b11d670837 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -9218,72 +9218,42 @@ __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_high_s8 (int8x16_t __a, int8x16_t __b) { - int16x8_t __result; - __asm__ ("smull2 %0.8h,%1.16b,%2.16b" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_vec_widen_smult_hi_v16qi (__a, __b); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_high_s16 (int16x8_t __a, int16x8_t __b) { - int32x4_t __result; - __asm__ ("smull2 %0.4s,%1.8h,%2.8h" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_vec_widen_smult_hi_v8hi (__a, __b); } __extension__ extern __inline int64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_high_s32 (int32x4_t __a, int32x4_t __b) { - int64x2_t __result; - __asm__ ("smull2 %0.2d,%1.4s,%2.4s" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_vec_widen_smult_hi_v4si (__a, __b); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_high_u8 (uint8x16_t __a, uint8x16_t __b) { - uint16x8_t __result; - __asm__ ("umull2 %0.8h,%1.16b,%2.16b" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_vec_widen_umult_hi_v16qi_uuu (__a, __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_high_u16 (uint16x8_t __a, uint16x8_t __b) { - uint32x4_t __result; - __asm__ ("umull2 %0.4s,%1.8h,%2.8h" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_vec_widen_umult_hi_v8hi_uuu (__a, __b); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_high_u32 (uint32x4_t __a, uint32x4_t __b) { - uint64x2_t __result; - __asm__ ("umull2 %0.2d,%1.4s,%2.4s" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_vec_widen_umult_hi_v4si_uuu (__a, __b); } #define vmull_lane_s16(a, b, c) \ @@ -9454,72 +9424,42 @@ __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_s8 (int8x8_t __a, int8x8_t __b) { - int16x8_t __result; - __asm__ ("smull %0.8h, %1.8b, %2.8b" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_intrinsic_vec_smult_lo_v8qi (__a, __b); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_s16 (int16x4_t __a, int16x4_t __b) { - int32x4_t __result; - __asm__ ("smull %0.4s, %1.4h, %2.4h" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_intrinsic_vec_smult_lo_v4hi (__a, __b); } __extension__ extern __inline int64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_s32 (int32x2_t __a, int32x2_t __b) { - int64x2_t __result; - __asm__ ("smull %0.2d, %1.2s, %2.2s" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_intrinsic_vec_smult_lo_v2si (__a, __b); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_u8 (uint8x8_t __a, uint8x8_t __b) { - uint16x8_t __result; - __asm__ ("umull %0.8h, %1.8b, %2.8b" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_intrinsic_vec_umult_lo_v8qi_uuu (__a, __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_u16 (uint16x4_t __a, uint16x4_t __b) { - uint32x4_t __result; - __asm__ ("umull %0.4s, %1.4h, %2.4h" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_intrinsic_vec_umult_lo_v4hi_uuu (__a, __b); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_u32 (uint32x2_t __a, uint32x2_t __b) { - uint64x2_t __result; - __asm__ ("umull %0.2d, %1.2s, %2.2s" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_intrinsic_vec_umult_lo_v2si_uuu (__a, __b); } __extension__ extern __inline int16x4_t diff --git a/gcc/testsuite/gcc.target/aarch64/vmull_high.c b/gcc/testsuite/gcc.target/aarch64/vmull_high.c new file mode 100644 index 00000000000..cddb7e7a96a --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vmull_high.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-additional-options "-O3" } */ + +#include <arm_neon.h> + +int64x2_t +doit (int8x16_t a) +{ + int16x8_t b = vmull_high_s8 (a, a); + int32x4_t c = vmull_high_s16 (b, b); + return vmull_high_s32 (c, c); +} + +uint64x2_t +douit (uint8x16_t a) +{ + uint16x8_t b = vmull_high_u8 (a, a); + uint32x4_t c = vmull_high_u16 (b, b); + return vmull_high_u32 (c, c); +} + +/* { dg-final { scan-assembler-times "smull2\[ |\t\]*v" 3} } */ +/* { dg-final { scan-assembler-times "umull2\[ |\t\]*v" 3} } */