Hi, As subject, this patch rewrites [su]mlal_n Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no issues. Ok for master? Thanks, Jonathan --- gcc/ChangeLog: 2021-01-26 Jonathan Wright <jonathan.wri...@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add [su]mlal_n builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_<su>mlal_n<mode>): Define. * config/aarch64/arm_neon.h (vmlal_n_s16): Use RTL builtin instead of inline asm. (vmlal_n_s32): Likewise. (vmlal_n_u16): Likewise. (vmlal_n_u32): Likewise.
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index a71ae4d724136c8b626d397bf6187e8b595a2b8a..4f8e28dc3c8478ea50aad333b21bd83f4a4b750e 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -196,6 +196,10 @@ BUILTIN_VD_BHSI (TERNOP, smlal, 0, NONE) BUILTIN_VD_BHSI (TERNOPU, umlal, 0, NONE) + /* Implemented by aarch64_<su>mlal_n<mode>. */ + BUILTIN_VD_HSI (TERNOP, smlal_n, 0, NONE) + BUILTIN_VD_HSI (TERNOPU, umlal_n, 0, NONE) + /* Implemented by aarch64_<su>mlsl_hi<mode>. */ BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE) BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index db56b61baf2093c88d8757b25580b3032f00a355..d78f26be19a16163eb1b8f661c6100ac290e6c6b 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1839,6 +1839,21 @@ [(set_attr "type" "neon_mla_<Vetype>_long")] ) +(define_insn "aarch64_<su>mlal_n<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (plus:<VWIDE> + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> + (vec_duplicate:VD_HSI + (match_operand:<VEL> 3 "register_operand" "<h_con>"))) + (ANY_EXTEND:<VWIDE> + (match_operand:VD_HSI 2 "register_operand" "w"))) + (match_operand:<VWIDE> 1 "register_operand" "0")))] + "TARGET_SIMD" + "<su>mlal\t%0.<Vwtype>, %2.<Vtype>, %3.<Vetype>[0]" + [(set_attr "type" "neon_mla_<Vetype>_long")] +) + (define_insn "aarch64_<su>mlsl<mode>" [(set (match_operand:<VWIDE> 0 "register_operand" "=w") (minus:<VWIDE> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 674ccc63b69ca1945dc684d2b06c1e31f52bfdb3..004c73d9e0ec4c33e24968d17e4307f858b51263 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7608,48 +7608,28 @@ __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) { - int32x4_t __result; - __asm__ ("smlal %0.4s,%2.4h,%3.h[0]" - : "=w"(__result) - : "0"(__a), "w"(__b), "x"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlal_nv4hi (__a, __b, __c); } __extension__ extern __inline int64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) { - int64x2_t __result; - __asm__ ("smlal %0.2d,%2.2s,%3.s[0]" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlal_nv2si (__a, __b, __c); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c) { - uint32x4_t __result; - __asm__ ("umlal %0.4s,%2.4h,%3.h[0]" - : "=w"(__result) - : "0"(__a), "w"(__b), "x"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlal_nv4hi_uuuu (__a, __b, __c); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c) { - uint64x2_t __result; - __asm__ ("umlal %0.2d,%2.2s,%3.s[0]" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlal_nv2si_uuuu (__a, __b, __c); } __extension__ extern __inline int16x8_t