Hi All,

Reg tested on aarch64-linux-gnu.

Okay for master?

Alfie

-- >8 --

Adds support for the AArch64 2024 fmmla extensions.

Note this includes a work around for spurious warnings from binutils with
movprfx and fmmla instructions (PR gas/33562).

gcc/ChangeLog:

        * config/aarch64/aarch64-builtins.cc (aarch64_expand_pragma_builtin):
        Add case for FMMLA.
        * config/aarch64/aarch64-c.cc (aarch64_update_cpp_builtins):
        Add new __ARM_FEATURE_X macros.
        * config/aarch64/aarch64-option-extensions.def (AARCH64_OPT_EXTENSION):
        Add f8f16mm, f8f32mm, and sve-f16f32mm extensions.
        * config/aarch64/aarch64-simd-pragma-builtins.def (vmmlaq_f16_mf8):
        New intrinsic.
        (vmmlaq_f32_mf8): New intrinsic.
        * config/aarch64/aarch64-simd.md (@aarch64_<insn><V8HF_ONLY:mode>): New
        instruction.
        (@aarch64_<insn><V4SF_ONLY:mode>): New instruction.
        * config/aarch64/aarch64-sve-builtins-base.cc: Update mmla_impl for
        new instructions.
        * config/aarch64/aarch64-sve-builtins-shapes.cc (struct mmla_def): Add
        support for the new widening forms.
        * config/aarch64/aarch64-sve-builtins-sve2.def (svmmla) Add new
        intrinsics.
        * config/aarch64/aarch64-sve-builtins.cc (TYPES_cvt_narrow_s): Fix
        comment.
        * config/aarch64/aarch64-sve2.md (aarch64_sve_fmmlavnx8hfvnx16qi): New
        instruction.
        (aarch64_sve_fmmlavnx4sfvnx16qi): New instruction.
        (aarch64_sve_fmmlavnx4sfvnx8hf): New instruction.
        * config/aarch64/aarch64.h (TARGET_F8F32MM): New macro.
        (TARGET_F8F16MM): New macro.
        (TARGET_SVE_F16F32MM): New macro.
        * config/aarch64/iterators.md (insn): Add fmmla entry.

gcc/testsuite/ChangeLog:

        * lib/target-supports.exp:
        * gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f16_mf8.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f32_mf8.c: New test.
        * gcc.target/aarch64/sve2/acle/asm/fmmla_f8f16mm_sve2.c: New test.
        * gcc.target/aarch64/sve2/acle/asm/fmmla_f8f32mm_sve2.c: New test.
        * gcc.target/aarch64/sve2/acle/asm/fmmla_sve_f16f32mm.c: New test.
---
 gcc/config/aarch64/aarch64-builtins.cc        |  1 +
 gcc/config/aarch64/aarch64-c.cc               |  5 ++
 .../aarch64/aarch64-option-extensions.def     |  6 ++
 .../aarch64/aarch64-simd-pragma-builtins.def  | 10 +++
 gcc/config/aarch64/aarch64-simd.md            | 29 +++++++++
 .../aarch64/aarch64-sve-builtins-base.cc      | 18 +++++-
 .../aarch64/aarch64-sve-builtins-shapes.cc    | 30 ++++++---
 .../aarch64/aarch64-sve-builtins-sve2.def     | 15 +++++
 gcc/config/aarch64/aarch64-sve-builtins.cc    |  2 +-
 gcc/config/aarch64/aarch64-sve2.md            | 62 +++++++++++++++++++
 gcc/config/aarch64/aarch64.h                  |  7 +++
 gcc/config/aarch64/iterators.md               |  5 +-
 .../advsimd-intrinsics/vmmlaq_f16_mf8.c       | 27 ++++++++
 .../advsimd-intrinsics/vmmlaq_f32_mf8.c       | 32 ++++++++++
 .../sve2/acle/asm/fmmla_f8f16mm_sve2.c        | 33 ++++++++++
 .../sve2/acle/asm/fmmla_f8f32mm_sve2.c        | 33 ++++++++++
 .../sve2/acle/asm/fmmla_sve_f16f32mm.c        | 31 ++++++++++
 gcc/testsuite/lib/target-supports.exp         |  2 +-
 18 files changed, 335 insertions(+), 13 deletions(-)
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f16_mf8.c
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f32_mf8.c
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f16mm_sve2.c
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f32mm_sve2.c
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_sve_f16f32mm.c

diff --git a/gcc/config/aarch64/aarch64-builtins.cc 
b/gcc/config/aarch64/aarch64-builtins.cc
index 408099a50e8..8e8ba035705 100644
--- a/gcc/config/aarch64/aarch64-builtins.cc
+++ b/gcc/config/aarch64/aarch64-builtins.cc
@@ -4009,6 +4009,7 @@ aarch64_expand_pragma_builtin (tree exp, rtx target,
 
     case UNSPEC_FAMAX:
     case UNSPEC_FAMIN:
+    case UNSPEC_FMMLA:
     case UNSPEC_F1CVTL_FP8:
     case UNSPEC_F2CVTL_FP8:
     case UNSPEC_FDOT_FP8:
diff --git a/gcc/config/aarch64/aarch64-c.cc b/gcc/config/aarch64/aarch64-c.cc
index c3957c762ef..824dd423250 100644
--- a/gcc/config/aarch64/aarch64-c.cc
+++ b/gcc/config/aarch64/aarch64-c.cc
@@ -309,6 +309,11 @@ aarch64_update_cpp_builtins (cpp_reader *pfile)
   cpp_undef (pfile, "__FLT_EVAL_METHOD_C99__");
   builtin_define_with_int_value ("__FLT_EVAL_METHOD_C99__",
                                 c_flt_eval_method (false));
+
+  aarch64_def_or_undef (TARGET_F8F16MM, "__ARM_FEATURE_F8F16MM", pfile);
+  aarch64_def_or_undef (TARGET_F8F32MM, "__ARM_FEATURE_F8F32MM", pfile);
+  aarch64_def_or_undef (TARGET_SVE_F16F32MM, "__ARM_FEATURE_SVE_F16F32MM",
+                       pfile);
 }
 
 /* Implement TARGET_CPU_CPP_BUILTINS.  */
diff --git a/gcc/config/aarch64/aarch64-option-extensions.def 
b/gcc/config/aarch64/aarch64-option-extensions.def
index a70375c053f..61755da91e2 100644
--- a/gcc/config/aarch64/aarch64-option-extensions.def
+++ b/gcc/config/aarch64/aarch64-option-extensions.def
@@ -279,6 +279,12 @@ AARCH64_OPT_EXTENSION("lut", LUT, (SIMD), (), (), "lut")
 
 AARCH64_OPT_EXTENSION ("sme-lutv2", SME_LUTv2, (SME2), (), (), "sme-lutv2")
 
+AARCH64_OPT_EXTENSION ("f8f16mm", F8F16MM, (SIMD, FP8), (), (), "")
+
+AARCH64_OPT_EXTENSION ("f8f32mm", F8F32MM, (SIMD, FP8), (), (), "")
+
+AARCH64_OPT_EXTENSION ("sve-f16f32mm", SVE_F16F32MM, (SVE), (), (), "")
+
 AARCH64_OPT_EXTENSION("cpa", CPA, (), (), (), "")
 
 #undef AARCH64_OPT_FMV_EXTENSION
diff --git a/gcc/config/aarch64/aarch64-simd-pragma-builtins.def 
b/gcc/config/aarch64/aarch64-simd-pragma-builtins.def
index 77682365103..5e7eefecca8 100644
--- a/gcc/config/aarch64/aarch64-simd-pragma-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-pragma-builtins.def
@@ -296,6 +296,16 @@ ENTRY_LOAD_LANE (vld4q_lane_mf8, mf8qx4, 
mf8_scalar_const_ptr, mf8qx4,
                 UNSPEC_LD4_LANE)
 #undef REQUIRED_EXTENSIONS
 
+// mmla f16 mf8
+#define REQUIRED_EXTENSIONS nonstreaming_only (TARGET_SIMD | TARGET_F8F16MM)
+ENTRY_TERNARY (vmmlaq_f16_mf8, f16q, f16q, mf8q, mf8q, UNSPEC_FMMLA, FP8)
+#undef REQUIRED_EXTENSIONS
+
+// mmla f32 mf8
+#define REQUIRED_EXTENSIONS nonstreaming_only (TARGET_SIMD | TARGET_F8F32MM)
+ENTRY_TERNARY (vmmlaq_f32_mf8, f32q, f32q, mf8q, mf8q, UNSPEC_FMMLA, FP8)
+#undef REQUIRED_EXTENSIONS
+
 // mov
 #define REQUIRED_EXTENSIONS nonstreaming_only (TARGET_SIMD)
 ENTRY_UNARY (vmov_n_mf8, mf8, mf8_scalar, UNSPEC_DUP, QUIET)
diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index a121a18f9a0..ce268beecb3 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -10535,3 +10535,32 @@ (define_insn 
"@aarch64_<insn>_lane<V4SF_ONLY:mode><VB:mode>"
     return "<insn>\t%0.<V4SF_ONLY:Vtype>, %2.16b, %3.b[%4]";
   }
 )
+
+(define_insn "@aarch64_<insn><V8HF_ONLY:mode>"
+  [(set (match_operand:V8HF 0 "register_operand")
+       (unspec:V8HF_ONLY
+        [(match_operand:V16QI 2 "register_operand")
+         (match_operand:V16QI 3 "register_operand")
+         (match_operand:V8HF 1 "register_operand")
+         (reg:DI FPM_REGNUM)]
+        FMMLA))]
+ "TARGET_F8F16MM"
+  {@ [ cons: =0 , 1 , 2 , 3 ]
+     [ w        , 0 , w , w ] <insn>\t%0.8h, %2.16b, %3.16b
+  }
+)
+
+(define_insn "@aarch64_<insn><V4SF_ONLY:mode>"
+  [(set (match_operand:V4SF 0 "register_operand")
+       (unspec:V4SF_ONLY
+        [(match_operand:V16QI 2 "register_operand")
+         (match_operand:V16QI 3 "register_operand")
+         (match_operand:V4SF 1 "register_operand")
+         (reg:DI FPM_REGNUM)]
+        FMMLA))]
+  "TARGET_F8F32MM"
+  {@ [ cons: =0 , 1 , 2 , 3 ]
+     [ w        , 0 , w , w ] <insn>\t%0.4s, %2.16b, %3.16b
+  }
+)
+
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index ecc06877cac..393499aac7c 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -2283,7 +2283,23 @@ public:
          icode = code_for_aarch64_sve_add (UNSPEC_SMATMUL, e.vector_mode (0));
       }
     else
-      icode = code_for_aarch64_sve (UNSPEC_FMMLA, e.vector_mode (0));
+      {
+       if (e.type_suffix (0).element_bits == 16
+           && e.type_suffix (1).element_bits == 8)
+         icode = CODE_FOR_aarch64_sve_fmmlavnx8hfvnx16qi;
+       else if (e.type_suffix (0).element_bits == 32
+                && e.type_suffix (1).element_bits == 8)
+         icode = CODE_FOR_aarch64_sve_fmmlavnx4sfvnx16qi;
+       else if (e.type_suffix (0).element_bits == 32
+                && e.type_suffix (1).element_bits == 16)
+         icode = CODE_FOR_aarch64_sve_fmmlavnx4sfvnx8hf;
+       else
+         {
+           gcc_assert (e.type_suffix (0).element_bits
+                       == e.type_suffix (1).element_bits);
+           icode = code_for_aarch64_sve (UNSPEC_FMMLA, e.vector_mode (0));
+         }
+      }
     return e.use_exact_insn (icode);
   }
 };
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc 
b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
index b315dc91cc7..852d5e53e4e 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
@@ -1019,7 +1019,8 @@ template <unsigned int BITS> struct luti_zt_base : public 
nonoverloaded_base
 
 /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t,
                       sv<t0:quarter>_t)  (for integer t0)
-   sv<t0>_t svmmla[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t)  (for floating-point t0)
+   sv<t0>_t svmmla[_t0](sv<t0>_t, sv<t1>_t, sv<t1>_t)
+                      (for floating-point t0, t1)
 
    The functions act like the equivalent of "ternary_qq" for integer elements
    and normal vector-only ternary functions for floating-point elements.  */
@@ -1030,7 +1031,12 @@ struct mmla_def : public overloaded_base<0>
   {
     b.add_overloaded_functions (group, MODE_none);
     if (type_suffixes[group.types[0][0]].float_p)
-      build_all (b, "v0,v0,v0,v0", group, MODE_none);
+      {
+       if (group.types[0][1] == NUM_TYPE_SUFFIXES)
+         build_all (b, "v0,v0,v0,v0", group, MODE_none);
+       else
+         build_all (b, "v0,v0,v1,v1", group, MODE_none);
+      }
     else
       build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
   }
@@ -1039,23 +1045,29 @@ struct mmla_def : public overloaded_base<0>
   resolve (function_resolver &r) const override
   {
     unsigned int i, nargs;
-    type_suffix_index type;
-    if (!r.check_gp_argument (3, i, nargs)
-       || (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
+    type_suffix_index type1;
+    type_suffix_index type2;
+    if (!r.check_gp_argument (3, i, nargs))
+      return error_mark_node;
+
+    if ((type1 = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
+       || (type2 = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
       return error_mark_node;
 
     /* Make sure that the function exists now, since not all forms
        follow a set pattern after this point.  */
-    tree res = r.resolve_to (r.mode_suffix_id, type);
+    tree res = type1 == type2 ? r.resolve_to (r.mode_suffix_id, type1)
+                             : r.resolve_to (r.mode_suffix_id, type1, type2);
     if (res == error_mark_node)
       return res;
 
-    bool float_p = type_suffixes[type].float_p;
+    bool float_p = type_suffixes[type1].float_p;
     unsigned int modifier = float_p ? r.SAME_SIZE : r.QUARTER_SIZE;
+    type_suffix_index type = float_p ? type2 : type1;
     if (!r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
                                        modifier)
-       || !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
-                                          modifier))
+       || !r.require_derived_vector_type (i + 2, i + 1, type,
+                                          r.SAME_TYPE_CLASS, modifier))
       return error_mark_node;
 
     return res;
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def 
b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
index b622fe33458..a59a5e2babc 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
@@ -418,3 +418,18 @@ DEF_SVE_FUNCTION_GS_FPM (svdot_lane, 
ternary_mfloat8_lane_group_selection, s_flo
 DEF_SVE_FUNCTION_GS_FPM (svdot, ternary_mfloat8, h_float_mf8, none, none, set)
 DEF_SVE_FUNCTION_GS_FPM (svdot_lane, ternary_mfloat8_lane_group_selection, 
h_float_mf8, none, none, set)
 #undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS \
+  nonstreaming_sve (AARCH64_FL_SVE2 | AARCH64_FL_F8F16MM)
+DEF_SVE_FUNCTION_GS_FPM (svmmla, mmla, h_float_mf8, none, none, set)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS \
+  nonstreaming_sve (AARCH64_FL_SVE2 | AARCH64_FL_F8F32MM)
+DEF_SVE_FUNCTION_GS_FPM (svmmla, mmla, s_float_mf8, none, none, set)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS \
+  nonstreaming_sve (AARCH64_FL_SVE_F16F32MM)
+DEF_SVE_FUNCTION_GS (svmmla, mmla, cvt_f32_f16, none, none)
+#undef REQUIRED_EXTENSIONS
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc 
b/gcc/config/aarch64/aarch64-sve-builtins.cc
index b2b03dc8cea..b0e262a7c2d 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins.cc
@@ -478,7 +478,7 @@ CONSTEXPR const group_suffix_info group_suffixes[] = {
 #define TYPES_cvt_long(S, D) \
   D (f32, f16), D (f64, f32)
 
-/* _f16_f32.  */
+/* _f32_f64.  */
 #define TYPES_cvt_narrow_s(S, D) \
   D (f32, f64)
 
diff --git a/gcc/config/aarch64/aarch64-sve2.md 
b/gcc/config/aarch64/aarch64-sve2.md
index 91091835182..1d97a98207e 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -134,6 +134,9 @@
 ;; ---- Optional AES extensions
 ;; ---- Optional SHA-3 extensions
 ;; ---- Optional SM4 extensions
+;;
+;; == FMMLA extensions
+;; ---- [FP] Matrix multiply-accumulate widening
 
 ;; =========================================================================
 ;; == Moves
@@ -4608,3 +4611,62 @@ (define_insn "aarch64_sve2_sm4ekey"
   "sm4ekey\t%0.s, %1.s, %2.s"
   [(set_attr "type" "crypto_sm4")]
 )
+
+;; =========================================================================
+;; == FMMLA extensions
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Matrix multiply-accumulate widening
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - FMMLA (F8F16MM,F8F32MM,SVE_F16F32MM)
+;; -------------------------------------------------------------------------
+
+(define_insn "aarch64_sve_fmmlavnx8hfvnx16qi"
+  [(set (match_operand:VNx8HF 0 "register_operand")
+       (unspec:VNx8HF
+         [(match_operand:VNx16QI 2 "register_operand")
+          (match_operand:VNx16QI 3 "register_operand")
+          (match_operand:VNx8HF 1 "register_operand")
+          (reg:DI FPM_REGNUM)]
+         FMMLA))]
+  "TARGET_SVE2 && TARGET_F8F16MM"
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] fmmla\t%0.h, %2.b, %3.b
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;fmmla\t%0.h, 
%2.b, %3.b
+  }
+  [(set_attr "sve_type" "sve_fp_mul")]
+)
+
+(define_insn "aarch64_sve_fmmlavnx4sfvnx16qi"
+  [(set (match_operand:VNx4SF 0 "register_operand")
+       (unspec:VNx4SF
+         [(match_operand:VNx16QI 2 "register_operand")
+          (match_operand:VNx16QI 3 "register_operand")
+          (match_operand:VNx4SF 1 "register_operand")
+          (reg:DI FPM_REGNUM)]
+         FMMLA))]
+  "TARGET_SVE2 && TARGET_F8F32MM"
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] fmmla\t%0.s, %2.b, %3.b
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;fmmla\t%0.s, 
%2.b, %3.b
+  }
+  [(set_attr "sve_type" "sve_fp_mul")]
+)
+
+(define_insn "aarch64_sve_fmmlavnx4sfvnx8hf"
+  [(set (match_operand:VNx4SF 0 "register_operand")
+       (unspec:VNx4SF
+        [(match_operand:VNx8HF 2 "register_operand")
+         (match_operand:VNx8HF 3 "register_operand")
+         (match_operand:VNx4SF 1 "register_operand")]
+        FMMLA))]
+  "TARGET_SVE_F16F32MM"
+  {@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
+     [ w        , 0 , w , w ; *              ] fmmla\t%0.s, %2.h, %3.h
+     [ ?&w      , w , w , w ; yes            ] movprfx\t%0, %1\;fmmla\t%0.s, 
%2.h, %3.h
+  }
+  [(set_attr "sve_type" "sve_fp_mul")]
+)
+
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 2cd929d83f9..2fef81f0dcd 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -392,6 +392,13 @@ constexpr auto AARCH64_FL_DEFAULT_ISA_MODE ATTRIBUTE_UNUSED
    but are incompatible with -mtrack-speculation. */
 #define TARGET_CMPBR (AARCH64_HAVE_ISA (CMPBR) && !aarch64_track_speculation)
 
+/* FP8F32MM instructions, enabled through +f8f32mm.  */
+#define TARGET_F8F32MM (AARCH64_HAVE_ISA (F8F32MM))
+/* F8F16MM instructions, enabled through +f8f16mm.  */
+#define TARGET_F8F16MM (AARCH64_HAVE_ISA (F8F16MM))
+/* SVE_F16F32MM instructions, enabled through +sve-f16f32mm.  */
+#define TARGET_SVE_F16F32MM (AARCH64_HAVE_ISA (SVE_F16F32MM))
+
 /* Make sure this is always defined so we don't have to check for ifdefs
    but rather use normal ifs.  */
 #ifndef TARGET_FIX_ERR_A53_835769_DEFAULT
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 517b2808b5f..e2cdfee308c 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -4014,6 +4014,8 @@ (define_int_iterator FSCALE_UNS [UNSPEC_FSCALE])
 (define_int_iterator FPM_FDOT [UNSPEC_FDOT_FP8])
 (define_int_iterator FPM_FDOT_LANE [UNSPEC_FDOT_LANE_FP8])
 
+(define_int_iterator FMMLA_UNS [UNSPEC_FMMLA])
+
 ;; -------------------------------------------------------------------
 ;; Int Iterators Attributes.
 ;; -------------------------------------------------------------------
@@ -4031,7 +4033,8 @@ (define_int_attr insn
    (UNSPEC_FMLALLBT_FP8 "fmlallbt")
    (UNSPEC_FMLALLTB_FP8 "fmlalltb")
    (UNSPEC_FMLALLTT_FP8 "fmlalltt")
-   (UNSPEC_FSCALE "fscale")])
+   (UNSPEC_FSCALE "fscale")
+   (UNSPEC_FMMLA "fmmla")])
 
 ;; The optab associated with an operation.  Note that for ANDF, IORF
 ;; and XORF, the optab pattern is not actually defined; we just use this
diff --git 
a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f16_mf8.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f16_mf8.c
new file mode 100644
index 00000000000..a23266681ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f16_mf8.c
@@ -0,0 +1,27 @@
+/* { dg-do assemble { target aarch64_asm_f8f16mm_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_f8f16mm_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+simd+f8f16mm"
+
+/*
+** svmmla_f16f8mm_tied:
+**     msr     fpmr, x0
+**     fmmla   z0\.h, z4\.b, z5\.b
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f16f8mm_tied, svfloat16_t, svmfloat8_t,
+            z0 = svmmla_f16_mf8_fpm (z0, z4, z5, fpm0),
+            z0 = svmmla_fpm (z0, z4, z5, fpm0))
+
+/*
+** svmmla_f16f8mm:
+**     msr     fpmr, x0
+**     movprfx z0, z1
+**     fmmla   z0\.h, z4\.b, z5\.b
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f16f8mm, svfloat16_t, svmfloat8_t,
+            z0 = svmmla_f16_mf8_fpm (z1, z4, z5, fpm0),
+            z0 = svmmla_fpm (z1, z4, z5, fpm0))
+
diff --git 
a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f32_mf8.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f32_mf8.c
new file mode 100644
index 00000000000..7ed363cbc9b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vmmlaq_f32_mf8.c
@@ -0,0 +1,32 @@
+/* { dg-do assemble { target aarch64_asm_f8f32mm_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_f8f32mm_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+simd+f8f32mm"
+
+#include <arm_neon.h>
+
+/*
+** fmmla_f32f8mm_tied:
+**     msr     fpmr, x0
+**     fmmla   v0\.s, v4\.b, v5\.b
+**     ret
+*/
+float32x4_t
+fmmla_f32f8mm_tied (float32x4_t z0, mfloat8x16_t z1, mfloat8x16_t z2, fpm_t 
fpm0)
+{
+  return vmmlaq_f32_mf8 (z0, z1, z2, fpm0);
+}
+
+/*
+** svmmla_f32f8mm:
+**     msr     fpmr, x0
+**     mov     v0, v1
+**     fmmla   v0\.s, v4\.b, v5\.b
+**     ret
+*/
+float32x4_t
+fmmla_f32f8mm (float32x4_t z0, float32x4_t z1, mfloat8x16_t z2, mfloat8x16_t 
z3, fpm_t fpm0)
+{
+  return vmmlaq_f32_mf8 (z1, z2, z3, fpm0);
+}
diff --git 
a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f16mm_sve2.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f16mm_sve2.c
new file mode 100644
index 00000000000..9a333ff4b1d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f16mm_sve2.c
@@ -0,0 +1,33 @@
+/* { dg-do assemble { target aarch64_asm_f8f16mm_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_f8f16mm_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+/* Binutils PR gas/33562 */
+/* { dg-prune-output "SVE `movprfx' compatible instruction expected" } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2+f8f16mm"
+
+/*
+** svmmla_f16f8mm_tied:
+**     msr     fpmr, x0
+**     fmmla   z0\.h, z4\.b, z5\.b
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f16f8mm_tied, svfloat16_t, svmfloat8_t,
+            z0 = svmmla_f16_mf8_fpm (z0, z4, z5, fpm0),
+            z0 = svmmla_fpm (z0, z4, z5, fpm0))
+
+/*
+** svmmla_f16f8mm:
+**     msr     fpmr, x0
+**     movprfx z0, z1
+**     fmmla   z0\.h, z4\.b, z5\.b
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f16f8mm, svfloat16_t, svmfloat8_t,
+            z0 = svmmla_f16_mf8_fpm (z1, z4, z5, fpm0),
+            z0 = svmmla_fpm (z1, z4, z5, fpm0))
+
diff --git 
a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f32mm_sve2.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f32mm_sve2.c
new file mode 100644
index 00000000000..edd8cef3a12
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_f8f32mm_sve2.c
@@ -0,0 +1,33 @@
+/* { dg-do assemble { target aarch64_asm_f8f32mm_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_f8f32mm_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+/* Binutils PR gas/33562 */
+/* { dg-prune-output "SVE `movprfx' compatible instruction expected" } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2+f8f32mm"
+
+/*
+** svmmla_f32f8mm_tied:
+**     msr     fpmr, x0
+**     fmmla   z0\.s, z4\.b, z5\.b
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f32f8mm_tied, svfloat32_t, svmfloat8_t,
+            z0 = svmmla_f32_mf8_fpm (z0, z4, z5, fpm0),
+            z0 = svmmla_fpm (z0, z4, z5, fpm0))
+
+/*
+** svmmla_f32f8mm:
+**     msr     fpmr, x0
+**     movprfx z0, z1
+**     fmmla   z0\.s, z4\.b, z5\.b
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f32f8mm, svfloat32_t, svmfloat8_t,
+            z0 = svmmla_f32_mf8_fpm (z1, z4, z5, fpm0),
+            z0 = svmmla_fpm (z1, z4, z5, fpm0))
+
diff --git 
a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_sve_f16f32mm.c 
b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_sve_f16f32mm.c
new file mode 100644
index 00000000000..bcc86decc97
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/fmmla_sve_f16f32mm.c
@@ -0,0 +1,31 @@
+/* { dg-do assemble { target aarch64_asm_f16f32mm-sve_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_f16f32mm-sve_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+/* Binutils PR gas/33562 */
+/* { dg-prune-output "SVE `movprfx' compatible instruction expected" } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve-f16f32mm"
+
+/*
+** svmmla_f32f16mm_tied:
+**     fmmla   z0\.s, z4\.h, z5\.h
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f32f16mm_tied, svfloat32_t, svfloat16_t,
+            z0 = svmmla_f32_f16 (z0, z4, z5),
+            z0 = svmmla (z0, z4, z5))
+
+/*
+** svmmla_f32f16mm:
+**     movprfx z0, z1
+**     fmmla   z0\.s, z4\.h, z5\.h
+**     ret
+*/
+TEST_DUAL_Z (svmmla_f32f16mm, svfloat32_t, svfloat16_t,
+            z0 = svmmla_f32_f16 (z1, z4, z5),
+            z0 = svmmla (z1, z4, z5))
+
diff --git a/gcc/testsuite/lib/target-supports.exp 
b/gcc/testsuite/lib/target-supports.exp
index 2d45ddaadc0..fa3adf94e3e 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -12535,7 +12535,7 @@ proc 
check_effective_target_aarch64_gas_has_build_attributes { } {
 set exts {
     "bf16" "cmpbr" "crc" "crypto" "dotprod" "f32mm" "f64mm" "faminmax"
     "fp" "fp8" "fp8dot2" "fp8dot4" "fp8fma" "i8mm" "ls64" "lse" "lut"
-    "sb" "simd" "sve-b16b16" "sve" "sve2"
+    "sb" "simd" "sve-b16b16" "sve" "sve2" "f8f16mm" "f8f32mm" "f16f32mm-sve"
 }
 
 # We don't support SME without SVE2, so we'll use armv9 as the base
-- 
2.34.1


Reply via email to