Hi, As subject, this patch rewrites [su]mlal_high Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no issues. Ok for master? Thanks, Jonathan --- gcc/ChangeLog: 2021-01-27 Jonathan Wright <jonathan.wri...@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add RTL builtin generator macros. * config/aarch64/aarch64-simd.md (*aarch64_<su>mlal_hi<mode>): Rename to... (aarch64_<su>mlal_hi<mode>_insn): This. (aarch64_<su>mlal_hi<mode>): Define. * config/aarch64/arm_neon.h (vmlal_high_s8): Use RTL builtin instead of inline asm. (vmlal_high_s16): Likewise. (vmlal_high_s32): Likewise. (vmlal_high_u8): Likewise. (vmlal_high_u16): Likewise. (vmlal_high_u32): Likewise.
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index b82b6431d6f2a8d7d21023da589f3eecec7f0d65..2d91a0768d66fb8570ce518c06faae28c0ffcf27 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -230,6 +230,10 @@ BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE) BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE) + /* Implemented by aarch64_<su>mlal_hi<mode>. */ + BUILTIN_VQW (TERNOP, smlal_hi, 0, NONE) + BUILTIN_VQW (TERNOPU, umlal_hi, 0, NONE) + BUILTIN_VSQN_HSDI (UNOPUS, sqmovun, 0, NONE) /* Implemented by aarch64_<sur>qmovn<mode>. */ BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index d1858663a4e78c0861d902b37e93c0b00d75e661..ff5037fb44ebb4d1d37ab838de6391e105e90bbf 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1869,7 +1869,7 @@ [(set_attr "type" "neon_mla_<Vetype>_long")] ) -(define_insn "*aarch64_<su>mlal_hi<mode>" +(define_insn "aarch64_<su>mlal_hi<mode>_insn" [(set (match_operand:<VWIDE> 0 "register_operand" "=w") (plus:<VWIDE> (mult:<VWIDE> @@ -1885,6 +1885,20 @@ [(set_attr "type" "neon_mla_<Vetype>_long")] ) +(define_expand "aarch64_<su>mlal_hi<mode>" + [(match_operand:<VWIDE> 0 "register_operand") + (match_operand:<VWIDE> 1 "register_operand") + (ANY_EXTEND:<VWIDE>(match_operand:VQW 2 "register_operand")) + (match_operand:VQW 3 "register_operand")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true); + emit_insn (gen_aarch64_<su>mlal_hi<mode>_insn (operands[0], operands[1], + operands[2], p, operands[3])); + DONE; +} +) + (define_insn "*aarch64_<su>mlsl_lo<mode>" [(set (match_operand:<VWIDE> 0 "register_operand" "=w") (minus:<VWIDE> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index ad0dfef80f39c1baf1e8c7c1bb95f325eff6ac7a..53aae934c37eadc179bb1d4e7fe033d06364628a 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7346,72 +7346,42 @@ __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c) { - int16x8_t __result; - __asm__ ("smlal2 %0.8h,%2.16b,%3.16b" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlal_hiv16qi (__a, __b, __c); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c) { - int32x4_t __result; - __asm__ ("smlal2 %0.4s,%2.8h,%3.8h" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlal_hiv8hi (__a, __b, __c); } __extension__ extern __inline int64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c) { - int64x2_t __result; - __asm__ ("smlal2 %0.2d,%2.4s,%3.4s" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlal_hiv4si (__a, __b, __c); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) { - uint16x8_t __result; - __asm__ ("umlal2 %0.8h,%2.16b,%3.16b" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlal_hiv16qi_uuuu (__a, __b, __c); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) { - uint32x4_t __result; - __asm__ ("umlal2 %0.4s,%2.8h,%3.8h" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlal_hiv8hi_uuuu (__a, __b, __c); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) { - uint64x2_t __result; - __asm__ ("umlal2 %0.2d,%2.4s,%3.4s" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlal_hiv4si_uuuu (__a, __b, __c); } __extension__ extern __inline int32x4_t