This extension defines vector instructions to extract a pair of FP16 data from
a floating-point register. Multiply the top FP16 data with the FP16 elements
and add the result with the bottom FP16 data.
gcc/ChangeLog:
* common/config/riscv/riscv-common.cc:
Turn on VECTOR_ELEN_FP_16 for XAndesvpackfph.
* config/riscv/andes-vector-builtins-bases.cc (nds_vfpmad): New class.
* config/riscv/andes-vector-builtins-bases.h: New def.
* config/riscv/andes-vector-builtins-functions.def (nds_vfpmadt): Ditto.
(nds_vfpmadb): Ditto.
(nds_vfpmadt_frm): Ditto.
(nds_vfpmadb_frm): Ditto.
* config/riscv/andes-vector.md (@pred_nds_vfpmad<nds_tb><mode>):
New pattern.
* config/riscv/riscv-vector-builtins-types.def
(DEF_RVV_F16_OPS): New def.
* config/riscv/riscv-vector-builtins.cc (f16_ops): Ditto
* config/riscv/riscv-vector-builtins.def (float32_type_node): Ditto.
* config/riscv/riscv-vector-builtins.h (XANDESVPACKFPH_EXT): Ditto.
(required_ext_to_isa_name): Add case XANDESVPACKFPH_EXT.
(required_extensions_specified): Ditto.
* config/riscv/vector-iterators.md (VHF): New iterator.
gcc/testsuite/ChangeLog:
*
gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c: New
test.
*
gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c: New
test.
*
gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c: New test.
*
gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c: New test.
*
gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c: New test.
*
gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c: New test.
* gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c:
New test.
* gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c:
New test.
---
gcc/common/config/riscv/riscv-common.cc | 3 +-
.../riscv/andes-vector-builtins-bases.cc | 25 +++
.../riscv/andes-vector-builtins-bases.h | 4 +
.../riscv/andes-vector-builtins-functions.def | 8 +
gcc/config/riscv/andes-vector.md | 32 +++
.../riscv/riscv-vector-builtins-types.def | 14 ++
gcc/config/riscv/riscv-vector-builtins.cc | 19 ++
gcc/config/riscv/riscv-vector-builtins.def | 1 +
gcc/config/riscv/riscv-vector-builtins.h | 5 +
gcc/config/riscv/vector-iterators.md | 5 +
.../non-policy/non-overloaded/nds_vfpmadb.c | 103 +++++++++
.../non-policy/non-overloaded/nds_vfpmadt.c | 103 +++++++++
.../non-policy/overloaded/nds_vfpmadb.c | 103 +++++++++
.../non-policy/overloaded/nds_vfpmadt.c | 103 +++++++++
.../policy/non-overloaded/nds_vfpmadb.c | 199 ++++++++++++++++++
.../policy/non-overloaded/nds_vfpmadt.c | 199 ++++++++++++++++++
.../policy/overloaded/nds_vfpmadb.c | 199 ++++++++++++++++++
.../policy/overloaded/nds_vfpmadt.c | 199 ++++++++++++++++++
18 files changed, 1323 insertions(+), 1 deletion(-)
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c
create mode 100644
gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c
diff --git a/gcc/common/config/riscv/riscv-common.cc
b/gcc/common/config/riscv/riscv-common.cc
index 5f192181688..9d605eaf190 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -1545,7 +1545,8 @@ static const riscv_extra_ext_flag_table_t
riscv_extra_ext_flag_table[] =
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_isa_flags, MASK_FULL_V),
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_isa_flags, MASK_VECTOR),
- RISCV_EXT_FLAG_ENTRY ("xandesvbfhcvt", x_riscv_vector_elen_flags,
MASK_VECTOR_ELEN_BF_16),
+ RISCV_EXT_FLAG_ENTRY ("xandesvbfhcvt", x_riscv_vector_elen_flags,
MASK_VECTOR_ELEN_BF_16),
+ RISCV_EXT_FLAG_ENTRY ("xandesvpackfph", x_riscv_vector_elen_flags,
MASK_VECTOR_ELEN_FP_16),
{NULL, NULL, NULL, 0}
};
diff --git a/gcc/config/riscv/andes-vector-builtins-bases.cc
b/gcc/config/riscv/andes-vector-builtins-bases.cc
index 7fef63f7c94..8220111f901 100644
--- a/gcc/config/riscv/andes-vector-builtins-bases.cc
+++ b/gcc/config/riscv/andes-vector-builtins-bases.cc
@@ -106,11 +106,32 @@ public:
}
};
+template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
+class nds_vfpmad : public function_base
+{
+public:
+ bool has_rounding_mode_operand_p () const override
+ {
+ return FRM_OP == HAS_FRM;
+ }
+ bool may_require_frm_p () const override { return true; }
+
+ rtx expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (code_for_pred_nds_vfpmad (UNSPEC,
+ e.vector_mode ()));
+ }
+};
+
static CONSTEXPR const nds_vfwcvtbf16_f nds_vfwcvt_s_obj;
static CONSTEXPR const nds_vfncvtbf16_f<NO_FRM> nds_vfncvt_bf16_obj;
static CONSTEXPR const nds_vfncvtbf16_f<HAS_FRM> nds_vfncvt_bf16_frm_obj;
static CONSTEXPR const nds_nibbleload<true> nds_vln8_obj;
static CONSTEXPR const nds_nibbleload<false> nds_vlnu8_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADT, NO_FRM> nds_vfpmadt_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADB, NO_FRM> nds_vfpmadb_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADT, HAS_FRM>
nds_vfpmadt_frm_obj;
+static CONSTEXPR const nds_vfpmad <UNSPEC_NDS_VFPMADB, HAS_FRM>
nds_vfpmadb_frm_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
@@ -122,4 +143,8 @@ BASE (nds_vfncvt_bf16)
BASE (nds_vfncvt_bf16_frm)
BASE (nds_vln8)
BASE (nds_vlnu8)
+BASE (nds_vfpmadt)
+BASE (nds_vfpmadb)
+BASE (nds_vfpmadt_frm)
+BASE (nds_vfpmadb_frm)
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/andes-vector-builtins-bases.h
b/gcc/config/riscv/andes-vector-builtins-bases.h
index b57480f6196..4b93f79aaaa 100644
--- a/gcc/config/riscv/andes-vector-builtins-bases.h
+++ b/gcc/config/riscv/andes-vector-builtins-bases.h
@@ -29,6 +29,10 @@ extern const function_base *const nds_vfncvt_bf16;
extern const function_base *const nds_vfncvt_bf16_frm;
extern const function_base *const nds_vln8;
extern const function_base *const nds_vlnu8;
+extern const function_base *const nds_vfpmadt;
+extern const function_base *const nds_vfpmadb;
+extern const function_base *const nds_vfpmadt_frm;
+extern const function_base *const nds_vfpmadb_frm;
}
} // end namespace riscv_vector
diff --git a/gcc/config/riscv/andes-vector-builtins-functions.def
b/gcc/config/riscv/andes-vector-builtins-functions.def
index 8e5926be06b..5d5762a3fdb 100644
--- a/gcc/config/riscv/andes-vector-builtins-functions.def
+++ b/gcc/config/riscv/andes-vector-builtins-functions.def
@@ -48,4 +48,12 @@ DEF_RVV_FUNCTION (nds_vln8, alu, full_preds,
q_v_void_const_ptr_ops)
DEF_RVV_FUNCTION (nds_vlnu8, alu, full_preds, qu_v_void_const_ptr_ops)
#undef REQUIRED_EXTENSIONS
+/* Prefix name for `__riscv_nds_`. */
+#define REQUIRED_EXTENSIONS XANDESVPACKFPH_EXT
+DEF_RVV_FUNCTION (nds_vfpmadt, alu, full_preds, f16_vvw_ops)
+DEF_RVV_FUNCTION (nds_vfpmadb, alu, full_preds, f16_vvw_ops)
+DEF_RVV_FUNCTION (nds_vfpmadt_frm, alu_frm, full_preds, f16_vvw_ops)
+DEF_RVV_FUNCTION (nds_vfpmadb_frm, alu_frm, full_preds, f16_vvw_ops)
+#undef REQUIRED_EXTENSIONS
+
#undef DEF_RVV_FUNCTION
diff --git a/gcc/config/riscv/andes-vector.md b/gcc/config/riscv/andes-vector.md
index 28bc55397c7..b2c886dcefe 100644
--- a/gcc/config/riscv/andes-vector.md
+++ b/gcc/config/riscv/andes-vector.md
@@ -21,8 +21,13 @@
UNSPEC_NDS_VFWCVTBF16
UNSPEC_NDS_VFNCVTBF16
UNSPEC_NDS_INTLOAD
+ UNSPEC_NDS_VFPMADT
+ UNSPEC_NDS_VFPMADB
])
+(define_int_iterator NDS_VFPMAD [UNSPEC_NDS_VFPMADT UNSPEC_NDS_VFPMADB])
+(define_int_attr nds_tb [(UNSPEC_NDS_VFPMADT "t") (UNSPEC_NDS_VFPMADB "b")])
+
;; ....................
;;
;; VECTOR BFLOAT16 CONVERSION
@@ -103,3 +108,30 @@
nds.vln<u>8.v\t%0,%3,%1.t"
[(set_attr "type" "vlde,vlde,vlde")
(set_attr "mode" "<MODE>")])
+
+;; Vector Packed FP16.
+
+(define_insn "@pred_nds_vfpmad<nds_tb><mode>"
+ [(set (match_operand:VHF 0 "register_operand" "=&vr, &vr")
+ (if_then_else:VHF
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1, vmWc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (match_operand 9 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)
+ (reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VHF
+ [(match_operand:VHF 3 "register_operand" "vr, vr")
+ (match_operand:SF 4 "register_operand" " f, f")] NDS_VFPMAD)
+ (match_operand:VHF 2 "vector_merge_operand" "vu, 0")))]
+ "TARGET_VECTOR && TARGET_XANDESVPACKFPH"
+ "nds.vfpmad<nds_tb>.vf\t%0,%4,%3%p1"
+ [(set_attr "type" "vfmuladd")
+ (set_attr "mode" "<MODE>")
+ (set_attr "enabled" "yes")
+ (set (attr "frm_mode")
+ (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def
b/gcc/config/riscv/riscv-vector-builtins-types.def
index d07a0baddb4..73fe1fae8f9 100644
--- a/gcc/config/riscv/riscv-vector-builtins-types.def
+++ b/gcc/config/riscv/riscv-vector-builtins-types.def
@@ -393,6 +393,12 @@ along with GCC; see the file COPYING3. If not see
#define DEF_RVV_QU_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_F16_OPS" macro include all types for vfloat16
+ which will be iterated and registered as intrinsic functions. */
+#ifndef DEF_RVV_F16_OPS
+#define DEF_RVV_F16_OPS(TYPE, REQUIRE)
+#endif
+
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
@@ -1529,6 +1535,13 @@ DEF_RVV_QU_OPS (vuint8m2_t, 0)
DEF_RVV_QU_OPS (vuint8m4_t, 0)
DEF_RVV_QU_OPS (vuint8m8_t, 0)
+DEF_RVV_F16_OPS (vfloat16mf4_t, RVV_REQUIRE_ELEN_FP_16 | RVV_REQUIRE_ELEN_64)
+DEF_RVV_F16_OPS (vfloat16mf2_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m1_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m2_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m4_t, RVV_REQUIRE_ELEN_FP_16)
+DEF_RVV_F16_OPS (vfloat16m8_t, RVV_REQUIRE_ELEN_FP_16)
+
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
@@ -1589,3 +1602,4 @@ DEF_RVV_QU_OPS (vuint8m8_t, 0)
#undef DEF_RVV_X2_WU_OPS
#undef DEF_RVV_Q_OPS
#undef DEF_RVV_QU_OPS
+#undef DEF_RVV_F16_OPS
diff --git a/gcc/config/riscv/riscv-vector-builtins.cc
b/gcc/config/riscv/riscv-vector-builtins.cc
index 0cc2dce76c6..90683b0fcd4 100644
--- a/gcc/config/riscv/riscv-vector-builtins.cc
+++ b/gcc/config/riscv/riscv-vector-builtins.cc
@@ -584,6 +584,12 @@ static const rvv_type_info qu_ops[] = {
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of FP16 will be registered for intrinsic functions. */
+static const rvv_type_info f16_ops[] = {
+#define DEF_RVV_F16_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end
= rvv_arg_type_info (NUM_BASE_TYPES);
@@ -1241,6 +1247,12 @@ static CONSTEXPR const rvv_arg_type_info sf_vc_fvw_args[]
static CONSTEXPR const rvv_arg_type_info void_const_ptr_args[]
= {rvv_arg_type_info (RVV_BASE_void_const_ptr), rvv_arg_type_info_end};
+/* A list of args for vector_type func (vector_type, widen_lmul1_scalar)
+ function. */
+static CONSTEXPR const rvv_arg_type_info vw_args[]
+ = {rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_float32), rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
@@ -3129,6 +3141,12 @@ static CONSTEXPR const rvv_op_info
qu_v_void_const_ptr_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
void_const_ptr_args /* Args */};
+static CONSTEXPR const rvv_op_info f16_vvw_ops
+ = {f16_ops, /* Types */
+ OP_TYPE_vf, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vw_args /* Args */};
+
/* A static operand information for vector_type func (vector_type).
Some insns just supports SEW=32, such as the crypto vector Zvkg extension.
* function registration. */
@@ -3425,6 +3443,7 @@ static CONSTEXPR const function_type_info
function_types[] = {
VECTOR_TYPE_INVALID,
\
VECTOR_TYPE_INVALID,
\
VECTOR_TYPE_INVALID,
\
+ VECTOR_TYPE_INVALID,
\
VECTOR_TYPE_##SIGNED_EEW8_INDEX,
\
VECTOR_TYPE_##EEW8_INDEX,
\
VECTOR_TYPE_##EEW16_INDEX,
\
diff --git a/gcc/config/riscv/riscv-vector-builtins.def
b/gcc/config/riscv/riscv-vector-builtins.def
index 7000e813afe..0ca98a10a7e 100644
--- a/gcc/config/riscv/riscv-vector-builtins.def
+++ b/gcc/config/riscv/riscv-vector-builtins.def
@@ -699,6 +699,7 @@ DEF_RVV_BASE_TYPE (size, size_type_node)
DEF_RVV_BASE_TYPE (ptrdiff, ptrdiff_type_node)
DEF_RVV_BASE_TYPE (unsigned_long, long_unsigned_type_node)
DEF_RVV_BASE_TYPE (long, long_integer_type_node)
+DEF_RVV_BASE_TYPE (float32, float32_type_node)
DEF_RVV_BASE_TYPE (signed_eew8_index, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (eew8_index, get_vector_type (type_idx))
DEF_RVV_BASE_TYPE (eew16_index, get_vector_type (type_idx))
diff --git a/gcc/config/riscv/riscv-vector-builtins.h
b/gcc/config/riscv/riscv-vector-builtins.h
index 2c0465f7f76..71dfc848985 100644
--- a/gcc/config/riscv/riscv-vector-builtins.h
+++ b/gcc/config/riscv/riscv-vector-builtins.h
@@ -133,6 +133,7 @@ enum required_ext
XSFVCP_EXT, /* XSFVCP extension*/
XANDESVBFHCVT_EXT, /* XANDESVBFHCVT extension */
XANDESVSINTLOAD_EXT, /* XANDESVSINTLOAD extension */
+ XANDESVPACKFPH_EXT, /* XANDESVPACKFPH extension */
/* Please update below to isa_name func when add or remove enum type(s). */
};
@@ -178,6 +179,8 @@ static inline const char * required_ext_to_isa_name (enum
required_ext required)
return "xandesvbfhcvt";
case XANDESVSINTLOAD_EXT:
return "xandesvsintload";
+ case XANDESVPACKFPH_EXT:
+ return "xandesvpackfph";
default:
gcc_unreachable ();
}
@@ -227,6 +230,8 @@ static inline bool required_extensions_specified (enum
required_ext required)
return TARGET_XANDESVBFHCVT;
case XANDESVSINTLOAD_EXT:
return TARGET_XANDESVSINTLOAD;
+ case XANDESVPACKFPH_EXT:
+ return TARGET_XANDESVPACKFPH;
default:
gcc_unreachable ();
}
diff --git a/gcc/config/riscv/vector-iterators.md
b/gcc/config/riscv/vector-iterators.md
index 802fa3be799..8a3815c0a57 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -5005,3 +5005,8 @@
RVVM8QI RVVM4QI RVVM2QI RVVM1QI
RVVMF2QI RVVMF4QI (RVVMF8QI "TARGET_MIN_VLEN > 32")
])
+
+(define_mode_iterator VHF [
+ RVVM8HF RVVM4HF RVVM2HF RVVM1HF RVVMF2HF
+ (RVVMF4HF "TARGET_MIN_VLEN > 32")
+])
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c
new file mode 100644
index 00000000000..decd594ceb5
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadb.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb_vf_f16m1(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb_vf_f16m2(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb_vf_f16m4(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb_vf_f16m8(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_m(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_m(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_m(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_m(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_m(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_m(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadb\.vf\s+} 24 } } */
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c
new file mode 100644
index 00000000000..bc104ff79a5
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/non-overloaded/nds_vfpmadt.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt_vf_f16m1(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt_vf_f16m2(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt_vf_f16m4(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt_vf_f16m8(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_m(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_m(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_m(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_m(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_m(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_m(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadt\.vf\s+} 24 } } */
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c
new file mode 100644
index 00000000000..bbb084dfae0
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadb.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadb\.vf\s+} 24 } } */
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c
new file mode 100644
index 00000000000..1e1347ed193
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/non-policy/overloaded/nds_vfpmadt.c
@@ -0,0 +1,103 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl)
{
+ return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2,
size_t vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t
vl) {
+ return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1,
float op2, size_t vl) {
+ return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times {nds\.vfpmadt\.vf\s+} 24 } } */
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c
new file mode 100644
index 00000000000..31de0dde2f7
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadb.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff,
vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff,
vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff,
vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff,
vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m1_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m2_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m4_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_vf_f16m8_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times
{vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadb\.vf\s+}
48 } } */
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c
new file mode 100644
index 00000000000..0b41e9a5c88
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/non-overloaded/nds_vfpmadt.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff,
vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff,
vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff,
vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff,
vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_tu(maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_tum(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m1_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m2_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m4_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_vf_f16m8_rm_mu(mask, maskedoff, op1, op2,
__RISCV_FRM_RNE, vl);
+}
+
+/* { dg-final { scan-assembler-times
{vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadt\.vf\s+}
48 } } */
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c
new file mode 100644
index 00000000000..ff79fe7aafd
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadb.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff,
vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff,
vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff,
vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff,
vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+/* { dg-final { scan-assembler-times
{vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadb\.vf\s+}
48 } } */
diff --git
a/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c
new file mode 100644
index 00000000000..f40b54fb258
--- /dev/null
+++
b/gcc/testsuite/gcc.target/riscv/rvv/xandesvector/policy/overloaded/nds_vfpmadt.c
@@ -0,0 +1,199 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gv_zvfh_xandesvpackfph -O3 -mabi=ilp32" { target {
rv32 } } } */
+/* { dg-options "-march=rv64gv_zvfh_xandesvpackfph -O3 -mabi=lp64" { target {
rv64 } } } */
+
+#include "andes_vector.h"
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t
op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff,
vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff,
vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff,
vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff,
vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff,
vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff,
vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t
maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t
maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t
maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t
maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t
maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t
maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+ return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE,
vl);
+}
+
+/* { dg-final { scan-assembler-times
{vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+nds\.vfpmadt\.vf\s+}
48 } } */
--
2.34.1