Hi,
As subject, this patch rewrites [su]mlsl_high_n Neon intrinsics to use RTL
builtins rather than inline assembly code, allowing for better scheduling and
optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu and
aarch64_be-none-elf - no issues.
Ok for master?
Thanks,
Jonathan
---
gcc/ChangeLog:
2021-01-27 Jonathan Wright
* config/aarch64/aarch64-simd-builtins.def: Add [su]mlsl_hi_n
builtin generator macros.
* config/aarch64/aarch64-simd.md (aarch64_mlsl_hi_n_insn):
Define.
(aarch64_mlsl_hi_n): Define.
* config/aarch64/arm_neon.h (vmlsl_high_n_s16): Use RTL builtin
instead of inline asm.
(vmlsl_high_n_s32): Likewise.
(vmlsl_high_n_u16): Likewise.
(vmlsl_high_n_u32): Likewise.
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index c102289c26123ae913df87d327237647d2621655..336f9f9a56b07668678e5b384a89f518433da58b 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -230,6 +230,10 @@
BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE)
BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE)
+ /* Implemented by aarch64_mlsl_hi_n. */
+ BUILTIN_VQ_HSI (TERNOP, smlsl_hi_n, 0, NONE)
+ BUILTIN_VQ_HSI (TERNOPU, umlsl_hi_n, 0, NONE)
+
/* Implemented by aarch64_mlal_hi. */
BUILTIN_VQW (TERNOP, smlal_hi, 0, NONE)
BUILTIN_VQW (TERNOPU, umlal_hi, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index a883f6ad4de8bb6d0c5f6478df5c516c159df4bb..1e9b4d933f3f9385d857b497e573de6aee25c57f 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1974,6 +1974,35 @@
}
)
+(define_insn "aarch64_mlsl_hi_n_insn"
+ [(set (match_operand: 0 "register_operand" "=w")
+(minus:
+ (match_operand: 1 "register_operand" "0")
+ (mult:
+(ANY_EXTEND: (vec_select:
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+(ANY_EXTEND: (vec_duplicate:
+ (match_operand: 4 "register_operand" ""))]
+ "TARGET_SIMD"
+ "mlsl2\t%0., %2., %4.[0]"
+ [(set_attr "type" "neon_mla__long")]
+)
+
+(define_expand "aarch64_mlsl_hi_n"
+ [(match_operand: 0 "register_operand")
+ (match_operand: 1 "register_operand")
+ (ANY_EXTEND:(match_operand:VQ_HSI 2 "register_operand"))
+ (match_operand: 3 "register_operand")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (mode, , true);
+ emit_insn (gen_aarch64_mlsl_hi_n_insn (operands[0],
+ operands[1], operands[2], p, operands[3]));
+ DONE;
+}
+)
+
(define_insn "aarch64_mlal"
[(set (match_operand: 0 "register_operand" "=w")
(plus:
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index ae8526d5972067c05265a1f0bcf9fde5e347fb3b..7e2c2fc3827e773b960abc137b2cadea61a54577 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7792,48 +7792,28 @@ __extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
{
- int32x4_t __result;
- __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[0]"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "x"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_smlsl_hi_nv8hi (__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
{
- int64x2_t __result;
- __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[0]"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_smlsl_hi_nv4si (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_u16 (uint32x4_t __a, uint16x8_t __b, uint16_t __c)
{
- uint32x4_t __result;
- __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[0]"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "x"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_umlsl_hi_nv8hi_ (__a, __b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_u32 (uint64x2_t __a, uint32x4_t __b, uint32_t __c)
{
- uint64x2_t __result;
- __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[0]"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_umlsl_hi_nv4si_ (__a, __b, __c);
}
__extension__ extern __inline int16x8_t