Implement vaddlvq using the new MVE builtins framework. Since we kept v4si hardcoded in the builtin name, we need to special-case it in unspec_mve_function_exact_insn_pred_p.
2022-10-25 Christophe Lyon <christophe.l...@arm.com> gcc/ * config/arm/arm-mve-builtins-base.cc (vaddlvq): New. * config/arm/arm-mve-builtins-base.def (vaddlvq): New. * config/arm/arm-mve-builtins-base.h (vaddlvq): New. * config/arm/arm-mve-builtins-functions.h (unspec_mve_function_exact_insn_pred_p): Handle vaddlvq. * config/arm/arm_mve.h (vaddlvq): Remove. (vaddlvq_p): Remove. (vaddlvq_s32): Remove. (vaddlvq_u32): Remove. (vaddlvq_p_s32): Remove. (vaddlvq_p_u32): Remove. (__arm_vaddlvq_s32): Remove. (__arm_vaddlvq_u32): Remove. (__arm_vaddlvq_p_s32): Remove. (__arm_vaddlvq_p_u32): Remove. (__arm_vaddlvq): Remove. (__arm_vaddlvq_p): Remove. --- gcc/config/arm/arm-mve-builtins-base.cc | 1 + gcc/config/arm/arm-mve-builtins-base.def | 1 + gcc/config/arm/arm-mve-builtins-base.h | 1 + gcc/config/arm/arm-mve-builtins-functions.h | 69 ++++++++++++++------ gcc/config/arm/arm_mve.h | 72 --------------------- 5 files changed, 51 insertions(+), 93 deletions(-) diff --git a/gcc/config/arm/arm-mve-builtins-base.cc b/gcc/config/arm/arm-mve-builtins-base.cc index e87069b0467..fdc0ff50b96 100644 --- a/gcc/config/arm/arm-mve-builtins-base.cc +++ b/gcc/config/arm/arm-mve-builtins-base.cc @@ -244,6 +244,7 @@ namespace arm_mve { FUNCTION_WITHOUT_N (vabdq, VABDQ) FUNCTION (vabsq, unspec_based_mve_function_exact_insn, (ABS, ABS, ABS, -1, -1, -1, VABSQ_M_S, -1, VABSQ_M_F, -1, -1, -1)) FUNCTION_WITH_RTX_M_N (vaddq, PLUS, VADDQ) +FUNCTION_PRED_P_S_U (vaddlvq, VADDLVQ) FUNCTION_PRED_P_S_U (vaddvq, VADDVQ) FUNCTION_PRED_P_S_U (vaddvaq, VADDVAQ) FUNCTION_WITH_RTX_M (vandq, AND, VANDQ) diff --git a/gcc/config/arm/arm-mve-builtins-base.def b/gcc/config/arm/arm-mve-builtins-base.def index 413fe4a1ef0..dcfb426a7fb 100644 --- a/gcc/config/arm/arm-mve-builtins-base.def +++ b/gcc/config/arm/arm-mve-builtins-base.def @@ -20,6 +20,7 @@ #define REQUIRES_FLOAT false DEF_MVE_FUNCTION (vabdq, binary, all_integer, mx_or_none) DEF_MVE_FUNCTION (vabsq, unary, all_signed, mx_or_none) +DEF_MVE_FUNCTION (vaddlvq, unary_acc, integer_32, p_or_none) DEF_MVE_FUNCTION (vaddq, binary_opt_n, all_integer, mx_or_none) DEF_MVE_FUNCTION (vaddvaq, unary_int32_acc, all_integer, p_or_none) DEF_MVE_FUNCTION (vaddvq, unary_int32, all_integer, p_or_none) diff --git a/gcc/config/arm/arm-mve-builtins-base.h b/gcc/config/arm/arm-mve-builtins-base.h index 5338b777444..5de70d5e1d4 100644 --- a/gcc/config/arm/arm-mve-builtins-base.h +++ b/gcc/config/arm/arm-mve-builtins-base.h @@ -25,6 +25,7 @@ namespace functions { extern const function_base *const vabdq; extern const function_base *const vabsq; +extern const function_base *const vaddlvq; extern const function_base *const vaddq; extern const function_base *const vaddvaq; extern const function_base *const vaddvq; diff --git a/gcc/config/arm/arm-mve-builtins-functions.h b/gcc/config/arm/arm-mve-builtins-functions.h index d069990dcab..ea926e42b81 100644 --- a/gcc/config/arm/arm-mve-builtins-functions.h +++ b/gcc/config/arm/arm-mve-builtins-functions.h @@ -408,32 +408,59 @@ public: expand (function_expander &e) const override { insn_code code; - switch (e.pred) + + if ((m_unspec_for_sint == VADDLVQ_S) + || m_unspec_for_sint == VADDLVAQ_S) { - case PRED_none: - if (e.type_suffix (0).integer_p) - if (e.type_suffix (0).unsigned_p) - code = code_for_mve_q (m_unspec_for_uint, m_unspec_for_uint, e.vector_mode (0)); - else - code = code_for_mve_q (m_unspec_for_sint, m_unspec_for_sint, e.vector_mode (0)); - else - code = code_for_mve_q_f (m_unspec_for_fp, e.vector_mode (0)); + switch (e.pred) + { + case PRED_none: + if (e.type_suffix (0).unsigned_p) + code = code_for_mve_q_v4si (m_unspec_for_uint, m_unspec_for_uint); + else + code = code_for_mve_q_v4si (m_unspec_for_sint, m_unspec_for_sint); + return e.use_exact_insn (code); - return e.use_exact_insn (code); + case PRED_p: + if (e.type_suffix (0).unsigned_p) + code = code_for_mve_q_p_v4si (m_unspec_for_p_uint, m_unspec_for_p_uint); + else + code = code_for_mve_q_p_v4si (m_unspec_for_p_sint, m_unspec_for_p_sint); + return e.use_exact_insn (code); - case PRED_p: - if (e.type_suffix (0).integer_p) - if (e.type_suffix (0).unsigned_p) - code = code_for_mve_q_p (m_unspec_for_p_uint, m_unspec_for_p_uint, e.vector_mode (0)); - else - code = code_for_mve_q_p (m_unspec_for_p_sint, m_unspec_for_p_sint, e.vector_mode (0)); - else - code = code_for_mve_q_p_f (m_unspec_for_p_fp, e.vector_mode (0)); + default: + gcc_unreachable (); + } + } + else + { + switch (e.pred) + { + case PRED_none: + if (e.type_suffix (0).integer_p) + if (e.type_suffix (0).unsigned_p) + code = code_for_mve_q (m_unspec_for_uint, m_unspec_for_uint, e.vector_mode (0)); + else + code = code_for_mve_q (m_unspec_for_sint, m_unspec_for_sint, e.vector_mode (0)); + else + code = code_for_mve_q_f (m_unspec_for_fp, e.vector_mode (0)); - return e.use_exact_insn (code); + return e.use_exact_insn (code); - default: - gcc_unreachable (); + case PRED_p: + if (e.type_suffix (0).integer_p) + if (e.type_suffix (0).unsigned_p) + code = code_for_mve_q_p (m_unspec_for_p_uint, m_unspec_for_p_uint, e.vector_mode (0)); + else + code = code_for_mve_q_p (m_unspec_for_p_sint, m_unspec_for_p_sint, e.vector_mode (0)); + else + code = code_for_mve_q_p_f (m_unspec_for_p_fp, e.vector_mode (0)); + + return e.use_exact_insn (code); + + default: + gcc_unreachable (); + } } gcc_unreachable (); diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index 74783570561..21d7768a732 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -42,11 +42,9 @@ #ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE #define vst4q(__addr, __value) __arm_vst4q(__addr, __value) -#define vaddlvq(__a) __arm_vaddlvq(__a) #define vmovlbq(__a) __arm_vmovlbq(__a) #define vmovltq(__a) __arm_vmovltq(__a) #define vmvnq(__a) __arm_vmvnq(__a) -#define vaddlvq_p(__a, __p) __arm_vaddlvq_p(__a, __p) #define vornq(__a, __b) __arm_vornq(__a, __b) #define vmulltq_int(__a, __b) __arm_vmulltq_int(__a, __b) #define vmullbq_int(__a, __b) __arm_vmullbq_int(__a, __b) @@ -324,7 +322,6 @@ #define vcvtq_f32_s32(__a) __arm_vcvtq_f32_s32(__a) #define vcvtq_f16_u16(__a) __arm_vcvtq_f16_u16(__a) #define vcvtq_f32_u32(__a) __arm_vcvtq_f32_u32(__a) -#define vaddlvq_s32(__a) __arm_vaddlvq_s32(__a) #define vmovlbq_s8(__a) __arm_vmovlbq_s8(__a) #define vmovlbq_s16(__a) __arm_vmovlbq_s16(__a) #define vmovltq_s8(__a) __arm_vmovltq_s8(__a) @@ -353,7 +350,6 @@ #define vmovlbq_u16(__a) __arm_vmovlbq_u16(__a) #define vmvnq_n_u16( __imm) __arm_vmvnq_n_u16( __imm) #define vmvnq_n_u32( __imm) __arm_vmvnq_n_u32( __imm) -#define vaddlvq_u32(__a) __arm_vaddlvq_u32(__a) #define vcvtq_u16_f16(__a) __arm_vcvtq_u16_f16(__a) #define vcvtq_u32_f32(__a) __arm_vcvtq_u32_f32(__a) #define vcvtpq_u16_f16(__a) __arm_vcvtpq_u16_f16(__a) @@ -379,8 +375,6 @@ #define vcvtq_n_s32_f32(__a, __imm6) __arm_vcvtq_n_s32_f32(__a, __imm6) #define vcvtq_n_u16_f16(__a, __imm6) __arm_vcvtq_n_u16_f16(__a, __imm6) #define vcvtq_n_u32_f32(__a, __imm6) __arm_vcvtq_n_u32_f32(__a, __imm6) -#define vaddlvq_p_s32(__a, __p) __arm_vaddlvq_p_s32(__a, __p) -#define vaddlvq_p_u32(__a, __p) __arm_vaddlvq_p_u32(__a, __p) #define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b) #define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b) #define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b) @@ -1499,13 +1493,6 @@ __arm_vst4q_u32 (uint32_t * __addr, uint32x4x4_t __value) __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o); } -__extension__ extern __inline int64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq_s32 (int32x4_t __a) -{ - return __builtin_mve_vaddlvq_sv4si (__a); -} - __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vmovlbq_s8 (int8x16_t __a) @@ -1632,13 +1619,6 @@ __arm_vmvnq_n_u32 (const int __imm) return __builtin_mve_vmvnq_n_uv4si (__imm); } -__extension__ extern __inline uint64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq_u32 (uint32x4_t __a) -{ - return __builtin_mve_vaddlvq_uv4si (__a); -} - __extension__ extern __inline mve_pred16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vctp16q (uint32_t __a) @@ -1674,20 +1654,6 @@ __arm_vpnot (mve_pred16_t __a) return __builtin_mve_vpnotv16bi (__a); } -__extension__ extern __inline int64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq_p_s32 (int32x4_t __a, mve_pred16_t __p) -{ - return __builtin_mve_vaddlvq_p_sv4si (__a, __p); -} - -__extension__ extern __inline uint64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq_p_u32 (uint32x4_t __a, mve_pred16_t __p) -{ - return __builtin_mve_vaddlvq_p_uv4si (__a, __p); -} - __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b) @@ -9586,13 +9552,6 @@ __arm_vst4q (uint32_t * __addr, uint32x4x4_t __value) __arm_vst4q_u32 (__addr, __value); } -__extension__ extern __inline int64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq (int32x4_t __a) -{ - return __arm_vaddlvq_s32 (__a); -} - __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vmovlbq (int8x16_t __a) @@ -9691,27 +9650,6 @@ __arm_vmovlbq (uint16x8_t __a) return __arm_vmovlbq_u16 (__a); } -__extension__ extern __inline uint64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq (uint32x4_t __a) -{ - return __arm_vaddlvq_u32 (__a); -} - -__extension__ extern __inline int64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq_p (int32x4_t __a, mve_pred16_t __p) -{ - return __arm_vaddlvq_p_s32 (__a, __p); -} - -__extension__ extern __inline uint64_t -__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -__arm_vaddlvq_p (uint32x4_t __a, mve_pred16_t __p) -{ - return __arm_vaddlvq_p_u32 (__a, __p); -} - __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) __arm_vornq (uint8x16_t __a, uint8x16_t __b) @@ -19212,16 +19150,6 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce3(p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) -#define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \ - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));}) - -#define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \ - _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \ - int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \ - int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));}) - #define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ __typeof(p1) __p1 = (p1); \ __typeof(p2) __p2 = (p2); \ -- 2.34.1