I committed the following patch which implements svmulh to
aarch64/sve-acle-branch. branch

Thanks,
Kugan
From 33b76de8ef5f370dfacba0addef2fe0b1f2a61db Mon Sep 17 00:00:00 2001
From: Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
Date: Fri, 18 Jan 2019 07:33:26 +1100
Subject: [PATCH] [SVE ACLE] Implements svmulh

Change-Id: Iaf4bd9898f46a53950e574750f68bdc709adbc1d
---
 gcc/config/aarch64/aarch64-sve-builtins.c          |  14 ++
 gcc/config/aarch64/aarch64-sve.md                  |  50 +++-
 gcc/config/aarch64/iterators.md                    |   2 +
 .../gcc.target/aarch64/sve-acle/asm/mulh_s16.c     | 254 +++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/mulh_s32.c     | 254 +++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/mulh_s64.c     | 254 +++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/mulh_s8.c      | 254 +++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/mulh_u16.c     | 254 +++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/mulh_u32.c     | 254 +++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/mulh_u64.c     | 254 +++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/mulh_u8.c      | 254 +++++++++++++++++++++
 gcc/tree-core.h                                    |   8 +-
 12 files changed, 2101 insertions(+), 5 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u8.c

diff --git a/gcc/config/aarch64/aarch64-sve-builtins.c b/gcc/config/aarch64/aarch64-sve-builtins.c
index c039ceb..b1deee9 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.c
+++ b/gcc/config/aarch64/aarch64-sve-builtins.c
@@ -169,6 +169,7 @@ enum function {
   FUNC_svmls,
   FUNC_svmsb,
   FUNC_svmul,
+  FUNC_svmulh,
   FUNC_svneg,
   FUNC_svnot,
   FUNC_svptrue,
@@ -463,6 +464,7 @@ private:
   rtx expand_mla ();
   rtx expand_mls ();
   rtx expand_mul ();
+  rtx expand_mulh ();
   rtx expand_neg ();
   rtx expand_not ();
   rtx expand_ptrue ();
@@ -1088,6 +1090,7 @@ arm_sve_h_builder::get_attributes (const function_instance &instance)
     case FUNC_svmls:
     case FUNC_svmsb:
     case FUNC_svmul:
+    case FUNC_svmulh:
     case FUNC_svneg:
     case FUNC_svnot:
     case FUNC_svqadd:
@@ -1700,6 +1703,7 @@ gimple_folder::fold ()
     case FUNC_svmls:
     case FUNC_svmsb:
     case FUNC_svmul:
+    case FUNC_svmulh:
     case FUNC_svneg:
     case FUNC_svnot:
     case FUNC_svqadd:
@@ -1808,6 +1812,9 @@ function_expander::expand ()
     case FUNC_svmul:
       return expand_mul ();
 
+    case FUNC_svmulh:
+      return expand_mulh ();
+
     case FUNC_svneg:
       return expand_neg ();
 
@@ -2033,6 +2040,13 @@ function_expander::expand_mul ()
     return expand_via_pred_direct_optab (cond_smul_optab);
 }
 
+/* Expand a call to svmulh.  */
+rtx
+function_expander::expand_mulh ()
+{
+  return expand_signed_pred_op (UNSPEC_SMUL_HIGHPART, UNSPEC_UMUL_HIGHPART, 0);
+}
+
 /* Expand a call to svneg.  */
 rtx
 function_expander::expand_neg ()
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index 944de82..6944d2b 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1210,7 +1210,7 @@
 )
 
 ;; Predicated highpart multiplication.
-(define_insn "*<su>mul<mode>3_highpart"
+(define_insn "@aarch64_pred_<optab><mode>"
   [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
 	(unspec:SVE_I
 	  [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
@@ -1225,6 +1225,54 @@
   [(set_attr "movprfx" "*,yes")]
 )
 
+;; Predicated MULH with select.
+(define_expand "@cond_<optab><mode>"
+  [(set (match_operand:SVE_I 0 "register_operand")
+	(unspec:SVE_I
+	  [(match_operand:<VPRED> 1 "register_operand")
+	   (unspec:SVE_I
+	     [(match_operand:SVE_I 2 "register_operand")
+	      (match_operand:SVE_I 3 "register_operand")]
+	     MUL_HIGHPART)
+	   (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
+	  UNSPEC_SEL))]
+  "TARGET_SVE"
+)
+
+;; Predicated MULH with select matching the first input.
+(define_insn "*cond_<optab><mode>_2"
+  [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
+	(unspec:SVE_I
+	  [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+	   (unspec:SVE_I
+	     [(match_operand:SVE_I 2 "register_operand" "0, w")
+	      (match_operand:SVE_I 3 "register_operand" "w, w")]
+	     MUL_HIGHPART)
+	   (match_dup 2)]
+	  UNSPEC_SEL))]
+  "TARGET_SVE"
+  "@
+   <su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+   movprfx\t%0, %2\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  [(set_attr "movprfx" "*,yes")])
+
+;; Predicated MULH with select matching zero.
+(define_insn "*cond_<optab><mode>_z"
+  [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w")
+	(unspec:SVE_I
+	  [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+	   (unspec:SVE_I
+	     [(match_operand:SVE_I 2 "register_operand" "%0, w")
+	      (match_operand:SVE_I 3 "register_operand" "w, w")]
+	     MUL_HIGHPART)
+	   (match_operand:SVE_I 4 "aarch64_simd_imm_zero")]
+	  UNSPEC_SEL))]
+  "TARGET_SVE"
+  "@
+   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<su>mulh\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  [(set_attr "movprfx" "yes")])
+
 ;; Unpredicated division.
 (define_expand "<optab><mode>3"
   [(set (match_operand:SVE_SDI 0 "register_operand")
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 1a3f539..fe1a92f 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -1629,6 +1629,8 @@
 			(UNSPEC_COND_FNEG "neg")
 			(UNSPEC_COND_FSQRT "sqrt")
 			(UNSPEC_COND_MUL "mul")
+		        (UNSPEC_SMUL_HIGHPART "smulh")
+		        (UNSPEC_UMUL_HIGHPART "umulh")
 			(UNSPEC_COND_DIV "div")
 			(UNSPEC_COND_FMAX "smax_nan")
 			(UNSPEC_COND_FMIN "smin_nan")
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s16.c
new file mode 100644
index 0000000..f526b76
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s16.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_s16_m_tied1:
+**	smulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_m_tied1, svint16_t,
+		z0 = svmulh_s16_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_s16_m_tied2, svint16_t,
+		z1 = svmulh_s16_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_s16_m_untied:
+**	movprfx	z0, z1
+**	smulh	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_m_untied, svint16_t,
+		z0 = svmulh_s16_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_w0_s16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	smulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s16_m_tied1, svint16_t, int16_t,
+		 z0 = svmulh_n_s16_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_w0_s16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	smulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s16_m_untied, svint16_t, int16_t,
+		 z0 = svmulh_n_s16_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_h0_s16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	smulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_s16_m_tied1, svint16_t, int16_t,
+		 z1 = svmulh_n_s16_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_h0_s16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	smulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_s16_m_untied, svint16_t, int16_t,
+		 z1 = svmulh_n_s16_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_s16_m_tied1:
+**	mov	(z[0-9]+\.h), #2
+**	smulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s16_m_tied1, svint16_t,
+		z0 = svmulh_n_s16_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_s16_m_untied:
+**	mov	(z[0-9]+\.h), #2
+**	movprfx	z0, z1
+**	smulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s16_m_untied, svint16_t,
+		z0 = svmulh_n_s16_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_s16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_z_tied1, svint16_t,
+		z0 = svmulh_s16_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	smulh	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_z_tied2, svint16_t,
+		z1 = svmulh_s16_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	smulh	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_z_untied, svint16_t,
+		z0 = svmulh_s16_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_w0_s16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s16_z_tied1, svint16_t, int16_t,
+		 z0 = svmulh_n_s16_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_w0_s16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, \1
+**	smulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s16_z_untied, svint16_t, int16_t,
+		 z0 = svmulh_n_s16_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_h0_s16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	smulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_s16_z_tied1, svint16_t, int16_t,
+		 z1 = svmulh_n_s16_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_h0_s16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, \1
+**	smulh	z1\.h, p0/m, z1\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_s16_z_untied, svint16_t, int16_t,
+		 z1 = svmulh_n_s16_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_s16_x_tied1:
+**	smulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_x_tied1, svint16_t,
+		z0 = svmulh_s16_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s16_x_tied2:
+**	smulh	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_x_tied2, svint16_t,
+		z1 = svmulh_s16_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s16_x_untied:
+**	movprfx	z2, z0
+**	smulh	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s16_x_untied, svint16_t,
+		z2 = svmulh_s16_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_w0_s16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	smulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s16_x_tied1, svint16_t, int16_t,
+		 z0 = svmulh_n_s16_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_w0_s16_x_untied:
+**	mov	z1\.h, w0
+**	smulh	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s16_x_untied, svint16_t, int16_t,
+		 z1 = svmulh_n_s16_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_h0_s16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	smulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_s16_x_tied1, svint16_t, int16_t,
+		 z1 = svmulh_n_s16_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_h0_s16_x_untied:
+**	mov	z2\.h, h0
+**	smulh	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_s16_x_untied, svint16_t, int16_t,
+		 z2 = svmulh_n_s16_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_s16_x_tied1:
+**	mov	(z[0-9]+\.h), #2
+**	smulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s16_x_tied1, svint16_t,
+		z0 = svmulh_n_s16_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_s16_x_untied:
+**	mov	z0\.h, #2
+**	smulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s16_x_untied, svint16_t,
+		z0 = svmulh_n_s16_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s32.c
new file mode 100644
index 0000000..68f4169
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s32.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_s32_m_tied1:
+**	smulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_m_tied1, svint32_t,
+		z0 = svmulh_s32_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_s32_m_tied2, svint32_t,
+		z1 = svmulh_s32_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_s32_m_untied:
+**	movprfx	z0, z1
+**	smulh	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_m_untied, svint32_t,
+		z0 = svmulh_s32_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_w0_s32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	smulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s32_m_tied1, svint32_t, int32_t,
+		 z0 = svmulh_n_s32_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_w0_s32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	smulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s32_m_untied, svint32_t, int32_t,
+		 z0 = svmulh_n_s32_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_s0_s32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	smulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_s32_m_tied1, svint32_t, int32_t,
+		 z1 = svmulh_n_s32_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_s0_s32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	smulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_s32_m_untied, svint32_t, int32_t,
+		 z1 = svmulh_n_s32_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_s32_m_tied1:
+**	mov	(z[0-9]+\.s), #2
+**	smulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s32_m_tied1, svint32_t,
+		z0 = svmulh_n_s32_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_s32_m_untied:
+**	mov	(z[0-9]+\.s), #2
+**	movprfx	z0, z1
+**	smulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s32_m_untied, svint32_t,
+		z0 = svmulh_n_s32_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_s32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_z_tied1, svint32_t,
+		z0 = svmulh_s32_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	smulh	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_z_tied2, svint32_t,
+		z1 = svmulh_s32_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	smulh	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_z_untied, svint32_t,
+		z0 = svmulh_s32_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_w0_s32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s32_z_tied1, svint32_t, int32_t,
+		 z0 = svmulh_n_s32_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_w0_s32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, \1
+**	smulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s32_z_untied, svint32_t, int32_t,
+		 z0 = svmulh_n_s32_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_s0_s32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	smulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_s32_z_tied1, svint32_t, int32_t,
+		 z1 = svmulh_n_s32_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_s0_s32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, \1
+**	smulh	z1\.s, p0/m, z1\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_s32_z_untied, svint32_t, int32_t,
+		 z1 = svmulh_n_s32_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_s32_x_tied1:
+**	smulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_x_tied1, svint32_t,
+		z0 = svmulh_s32_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s32_x_tied2:
+**	smulh	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_x_tied2, svint32_t,
+		z1 = svmulh_s32_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s32_x_untied:
+**	movprfx	z2, z0
+**	smulh	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s32_x_untied, svint32_t,
+		z2 = svmulh_s32_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_w0_s32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	smulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s32_x_tied1, svint32_t, int32_t,
+		 z0 = svmulh_n_s32_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_w0_s32_x_untied:
+**	mov	z1\.s, w0
+**	smulh	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s32_x_untied, svint32_t, int32_t,
+		 z1 = svmulh_n_s32_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_s0_s32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	smulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_s32_x_tied1, svint32_t, int32_t,
+		 z1 = svmulh_n_s32_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_s0_s32_x_untied:
+**	mov	z2\.s, s0
+**	smulh	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_s32_x_untied, svint32_t, int32_t,
+		 z2 = svmulh_n_s32_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_s32_x_tied1:
+**	mov	(z[0-9]+\.s), #2
+**	smulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s32_x_tied1, svint32_t,
+		z0 = svmulh_n_s32_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_s32_x_untied:
+**	mov	z0\.s, #2
+**	smulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s32_x_untied, svint32_t,
+		z0 = svmulh_n_s32_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s64.c
new file mode 100644
index 0000000..3955a7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s64.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_s64_m_tied1:
+**	smulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_m_tied1, svint64_t,
+		z0 = svmulh_s64_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_s64_m_tied2, svint64_t,
+		z1 = svmulh_s64_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_s64_m_untied:
+**	movprfx	z0, z1
+**	smulh	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_m_untied, svint64_t,
+		z0 = svmulh_s64_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_x0_s64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	smulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_s64_m_tied1, svint64_t, int64_t,
+		 z0 = svmulh_n_s64_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_x0_s64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	smulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_s64_m_untied, svint64_t, int64_t,
+		 z0 = svmulh_n_s64_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_d0_s64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	smulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_s64_m_tied1, svint64_t, int64_t,
+		 z1 = svmulh_n_s64_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_d0_s64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	smulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_s64_m_untied, svint64_t, int64_t,
+		 z1 = svmulh_n_s64_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_s64_m_tied1:
+**	mov	(z[0-9]+\.d), #2
+**	smulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s64_m_tied1, svint64_t,
+		z0 = svmulh_n_s64_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_s64_m_untied:
+**	mov	(z[0-9]+\.d), #2
+**	movprfx	z0, z1
+**	smulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s64_m_untied, svint64_t,
+		z0 = svmulh_n_s64_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_s64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_z_tied1, svint64_t,
+		z0 = svmulh_s64_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	smulh	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_z_tied2, svint64_t,
+		z1 = svmulh_s64_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	smulh	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_z_untied, svint64_t,
+		z0 = svmulh_s64_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_x0_s64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_s64_z_tied1, svint64_t, int64_t,
+		 z0 = svmulh_n_s64_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_x0_s64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, \1
+**	smulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_s64_z_untied, svint64_t, int64_t,
+		 z0 = svmulh_n_s64_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_d0_s64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	smulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_s64_z_tied1, svint64_t, int64_t,
+		 z1 = svmulh_n_s64_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_d0_s64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, \1
+**	smulh	z1\.d, p0/m, z1\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_s64_z_untied, svint64_t, int64_t,
+		 z1 = svmulh_n_s64_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_s64_x_tied1:
+**	smulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_x_tied1, svint64_t,
+		z0 = svmulh_s64_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s64_x_tied2:
+**	smulh	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_x_tied2, svint64_t,
+		z1 = svmulh_s64_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s64_x_untied:
+**	movprfx	z2, z0
+**	smulh	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s64_x_untied, svint64_t,
+		z2 = svmulh_s64_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_x0_s64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	smulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_s64_x_tied1, svint64_t, int64_t,
+		 z0 = svmulh_n_s64_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_x0_s64_x_untied:
+**	mov	z1\.d, x0
+**	smulh	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_s64_x_untied, svint64_t, int64_t,
+		 z1 = svmulh_n_s64_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_d0_s64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	smulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_s64_x_tied1, svint64_t, int64_t,
+		 z1 = svmulh_n_s64_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_d0_s64_x_untied:
+**	mov	z2\.d, d0
+**	smulh	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_s64_x_untied, svint64_t, int64_t,
+		 z2 = svmulh_n_s64_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_s64_x_tied1:
+**	mov	(z[0-9]+\.d), #2
+**	smulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s64_x_tied1, svint64_t,
+		z0 = svmulh_n_s64_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_s64_x_untied:
+**	mov	z0\.d, #2
+**	smulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s64_x_untied, svint64_t,
+		z0 = svmulh_n_s64_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s8.c
new file mode 100644
index 0000000..5eba6b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_s8.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_s8_m_tied1:
+**	smulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_m_tied1, svint8_t,
+		z0 = svmulh_s8_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_s8_m_tied2, svint8_t,
+		z1 = svmulh_s8_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_s8_m_untied:
+**	movprfx	z0, z1
+**	smulh	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_m_untied, svint8_t,
+		z0 = svmulh_s8_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_w0_s8_m_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	smulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s8_m_tied1, svint8_t, int8_t,
+		 z0 = svmulh_n_s8_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_w0_s8_m_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0, z1
+**	smulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s8_m_untied, svint8_t, int8_t,
+		 z0 = svmulh_n_s8_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_b0_s8_m_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	smulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_s8_m_tied1, svint8_t, int8_t,
+		 z1 = svmulh_n_s8_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_b0_s8_m_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1, z2
+**	smulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_s8_m_untied, svint8_t, int8_t,
+		 z1 = svmulh_n_s8_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_s8_m_tied1:
+**	mov	(z[0-9]+\.b), #2
+**	smulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s8_m_tied1, svint8_t,
+		z0 = svmulh_n_s8_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_s8_m_untied:
+**	mov	(z[0-9]+\.b), #2
+**	movprfx	z0, z1
+**	smulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s8_m_untied, svint8_t,
+		z0 = svmulh_n_s8_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_s8_z_tied1:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_z_tied1, svint8_t,
+		z0 = svmulh_s8_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s8_z_tied2:
+**	movprfx	z1\.b, p0/z, z1\.b
+**	smulh	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_z_tied2, svint8_t,
+		z1 = svmulh_s8_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_s8_z_untied:
+**	movprfx	z0\.b, p0/z, z1\.b
+**	smulh	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_z_untied, svint8_t,
+		z0 = svmulh_s8_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_w0_s8_z_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s8_z_tied1, svint8_t, int8_t,
+		 z0 = svmulh_n_s8_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_w0_s8_z_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, \1
+**	smulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s8_z_untied, svint8_t, int8_t,
+		 z0 = svmulh_n_s8_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_b0_s8_z_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z1\.b
+**	smulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_s8_z_tied1, svint8_t, int8_t,
+		 z1 = svmulh_n_s8_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_b0_s8_z_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, \1
+**	smulh	z1\.b, p0/m, z1\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_s8_z_untied, svint8_t, int8_t,
+		 z1 = svmulh_n_s8_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_s8_x_tied1:
+**	smulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_x_tied1, svint8_t,
+		z0 = svmulh_s8_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s8_x_tied2:
+**	smulh	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_x_tied2, svint8_t,
+		z1 = svmulh_s8_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_s8_x_untied:
+**	movprfx	z2, z0
+**	smulh	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_s8_x_untied, svint8_t,
+		z2 = svmulh_s8_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_w0_s8_x_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	smulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s8_x_tied1, svint8_t, int8_t,
+		 z0 = svmulh_n_s8_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_w0_s8_x_untied:
+**	mov	z1\.b, w0
+**	smulh	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_s8_x_untied, svint8_t, int8_t,
+		 z1 = svmulh_n_s8_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_b0_s8_x_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	smulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_s8_x_tied1, svint8_t, int8_t,
+		 z1 = svmulh_n_s8_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_b0_s8_x_untied:
+**	mov	z2\.b, b0
+**	smulh	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_s8_x_untied, svint8_t, int8_t,
+		 z2 = svmulh_n_s8_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_s8_x_tied1:
+**	mov	(z[0-9]+\.b), #2
+**	smulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s8_x_tied1, svint8_t,
+		z0 = svmulh_n_s8_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_s8_x_untied:
+**	mov	z0\.b, #2
+**	smulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_s8_x_untied, svint8_t,
+		z0 = svmulh_n_s8_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u16.c
new file mode 100644
index 0000000..12a3717
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u16.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_u16_m_tied1:
+**	umulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_m_tied1, svuint16_t,
+		z0 = svmulh_u16_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_u16_m_tied2, svuint16_t,
+		z1 = svmulh_u16_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_u16_m_untied:
+**	movprfx	z0, z1
+**	umulh	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_m_untied, svuint16_t,
+		z0 = svmulh_u16_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_w0_u16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	umulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u16_m_tied1, svuint16_t, uint16_t,
+		 z0 = svmulh_n_u16_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_w0_u16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	umulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u16_m_untied, svuint16_t, uint16_t,
+		 z0 = svmulh_n_u16_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_h0_u16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	umulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_u16_m_tied1, svuint16_t, uint16_t,
+		 z1 = svmulh_n_u16_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_h0_u16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	umulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_u16_m_untied, svuint16_t, uint16_t,
+		 z1 = svmulh_n_u16_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_u16_m_tied1:
+**	mov	(z[0-9]+\.h), #2
+**	umulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u16_m_tied1, svuint16_t,
+		z0 = svmulh_n_u16_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_u16_m_untied:
+**	mov	(z[0-9]+\.h), #2
+**	movprfx	z0, z1
+**	umulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u16_m_untied, svuint16_t,
+		z0 = svmulh_n_u16_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_u16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_z_tied1, svuint16_t,
+		z0 = svmulh_u16_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	umulh	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_z_tied2, svuint16_t,
+		z1 = svmulh_u16_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	umulh	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_z_untied, svuint16_t,
+		z0 = svmulh_u16_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_w0_u16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u16_z_tied1, svuint16_t, uint16_t,
+		 z0 = svmulh_n_u16_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_w0_u16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, \1
+**	umulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u16_z_untied, svuint16_t, uint16_t,
+		 z0 = svmulh_n_u16_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_h0_u16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	umulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_u16_z_tied1, svuint16_t, uint16_t,
+		 z1 = svmulh_n_u16_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_h0_u16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, \1
+**	umulh	z1\.h, p0/m, z1\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_u16_z_untied, svuint16_t, uint16_t,
+		 z1 = svmulh_n_u16_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_u16_x_tied1:
+**	umulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_x_tied1, svuint16_t,
+		z0 = svmulh_u16_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u16_x_tied2:
+**	umulh	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_x_tied2, svuint16_t,
+		z1 = svmulh_u16_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u16_x_untied:
+**	movprfx	z2, z0
+**	umulh	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u16_x_untied, svuint16_t,
+		z2 = svmulh_u16_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_w0_u16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	umulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u16_x_tied1, svuint16_t, uint16_t,
+		 z0 = svmulh_n_u16_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_w0_u16_x_untied:
+**	mov	z1\.h, w0
+**	umulh	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u16_x_untied, svuint16_t, uint16_t,
+		 z1 = svmulh_n_u16_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_h0_u16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	umulh	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_u16_x_tied1, svuint16_t, uint16_t,
+		 z1 = svmulh_n_u16_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_h0_u16_x_untied:
+**	mov	z2\.h, h0
+**	umulh	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_h0_u16_x_untied, svuint16_t, uint16_t,
+		 z2 = svmulh_n_u16_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_u16_x_tied1:
+**	mov	(z[0-9]+\.h), #2
+**	umulh	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u16_x_tied1, svuint16_t,
+		z0 = svmulh_n_u16_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_u16_x_untied:
+**	mov	z0\.h, #2
+**	umulh	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u16_x_untied, svuint16_t,
+		z0 = svmulh_n_u16_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u32.c
new file mode 100644
index 0000000..218d31f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u32.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_u32_m_tied1:
+**	umulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_m_tied1, svuint32_t,
+		z0 = svmulh_u32_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_u32_m_tied2, svuint32_t,
+		z1 = svmulh_u32_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_u32_m_untied:
+**	movprfx	z0, z1
+**	umulh	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_m_untied, svuint32_t,
+		z0 = svmulh_u32_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_w0_u32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	umulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u32_m_tied1, svuint32_t, uint32_t,
+		 z0 = svmulh_n_u32_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_w0_u32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	umulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u32_m_untied, svuint32_t, uint32_t,
+		 z0 = svmulh_n_u32_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_s0_u32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	umulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_u32_m_tied1, svuint32_t, uint32_t,
+		 z1 = svmulh_n_u32_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_s0_u32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	umulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_u32_m_untied, svuint32_t, uint32_t,
+		 z1 = svmulh_n_u32_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_u32_m_tied1:
+**	mov	(z[0-9]+\.s), #2
+**	umulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u32_m_tied1, svuint32_t,
+		z0 = svmulh_n_u32_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_u32_m_untied:
+**	mov	(z[0-9]+\.s), #2
+**	movprfx	z0, z1
+**	umulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u32_m_untied, svuint32_t,
+		z0 = svmulh_n_u32_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_u32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_z_tied1, svuint32_t,
+		z0 = svmulh_u32_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	umulh	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_z_tied2, svuint32_t,
+		z1 = svmulh_u32_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	umulh	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_z_untied, svuint32_t,
+		z0 = svmulh_u32_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_w0_u32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u32_z_tied1, svuint32_t, uint32_t,
+		 z0 = svmulh_n_u32_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_w0_u32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, \1
+**	umulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u32_z_untied, svuint32_t, uint32_t,
+		 z0 = svmulh_n_u32_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_s0_u32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	umulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_u32_z_tied1, svuint32_t, uint32_t,
+		 z1 = svmulh_n_u32_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_s0_u32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, \1
+**	umulh	z1\.s, p0/m, z1\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_u32_z_untied, svuint32_t, uint32_t,
+		 z1 = svmulh_n_u32_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_u32_x_tied1:
+**	umulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_x_tied1, svuint32_t,
+		z0 = svmulh_u32_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u32_x_tied2:
+**	umulh	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_x_tied2, svuint32_t,
+		z1 = svmulh_u32_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u32_x_untied:
+**	movprfx	z2, z0
+**	umulh	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u32_x_untied, svuint32_t,
+		z2 = svmulh_u32_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_w0_u32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	umulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u32_x_tied1, svuint32_t, uint32_t,
+		 z0 = svmulh_n_u32_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_w0_u32_x_untied:
+**	mov	z1\.s, w0
+**	umulh	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u32_x_untied, svuint32_t, uint32_t,
+		 z1 = svmulh_n_u32_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_s0_u32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	umulh	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_u32_x_tied1, svuint32_t, uint32_t,
+		 z1 = svmulh_n_u32_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_s0_u32_x_untied:
+**	mov	z2\.s, s0
+**	umulh	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_s0_u32_x_untied, svuint32_t, uint32_t,
+		 z2 = svmulh_n_u32_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_u32_x_tied1:
+**	mov	(z[0-9]+\.s), #2
+**	umulh	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u32_x_tied1, svuint32_t,
+		z0 = svmulh_n_u32_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_u32_x_untied:
+**	mov	z0\.s, #2
+**	umulh	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u32_x_untied, svuint32_t,
+		z0 = svmulh_n_u32_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u64.c
new file mode 100644
index 0000000..4fb83fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u64.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_u64_m_tied1:
+**	umulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_m_tied1, svuint64_t,
+		z0 = svmulh_u64_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_u64_m_tied2, svuint64_t,
+		z1 = svmulh_u64_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_u64_m_untied:
+**	movprfx	z0, z1
+**	umulh	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_m_untied, svuint64_t,
+		z0 = svmulh_u64_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_x0_u64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	umulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_u64_m_tied1, svuint64_t, uint64_t,
+		 z0 = svmulh_n_u64_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_x0_u64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	umulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_u64_m_untied, svuint64_t, uint64_t,
+		 z0 = svmulh_n_u64_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_d0_u64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	umulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_u64_m_tied1, svuint64_t, uint64_t,
+		 z1 = svmulh_n_u64_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_d0_u64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	umulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_u64_m_untied, svuint64_t, uint64_t,
+		 z1 = svmulh_n_u64_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_u64_m_tied1:
+**	mov	(z[0-9]+\.d), #2
+**	umulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u64_m_tied1, svuint64_t,
+		z0 = svmulh_n_u64_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_u64_m_untied:
+**	mov	(z[0-9]+\.d), #2
+**	movprfx	z0, z1
+**	umulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u64_m_untied, svuint64_t,
+		z0 = svmulh_n_u64_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_u64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_z_tied1, svuint64_t,
+		z0 = svmulh_u64_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	umulh	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_z_tied2, svuint64_t,
+		z1 = svmulh_u64_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	umulh	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_z_untied, svuint64_t,
+		z0 = svmulh_u64_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_x0_u64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_u64_z_tied1, svuint64_t, uint64_t,
+		 z0 = svmulh_n_u64_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_x0_u64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, \1
+**	umulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_u64_z_untied, svuint64_t, uint64_t,
+		 z0 = svmulh_n_u64_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_d0_u64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	umulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_u64_z_tied1, svuint64_t, uint64_t,
+		 z1 = svmulh_n_u64_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_d0_u64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, \1
+**	umulh	z1\.d, p0/m, z1\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_u64_z_untied, svuint64_t, uint64_t,
+		 z1 = svmulh_n_u64_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_u64_x_tied1:
+**	umulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_x_tied1, svuint64_t,
+		z0 = svmulh_u64_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u64_x_tied2:
+**	umulh	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_x_tied2, svuint64_t,
+		z1 = svmulh_u64_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u64_x_untied:
+**	movprfx	z2, z0
+**	umulh	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u64_x_untied, svuint64_t,
+		z2 = svmulh_u64_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_x0_u64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	umulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_u64_x_tied1, svuint64_t, uint64_t,
+		 z0 = svmulh_n_u64_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_x0_u64_x_untied:
+**	mov	z1\.d, x0
+**	umulh	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_x0_u64_x_untied, svuint64_t, uint64_t,
+		 z1 = svmulh_n_u64_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_d0_u64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	umulh	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_u64_x_tied1, svuint64_t, uint64_t,
+		 z1 = svmulh_n_u64_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_d0_u64_x_untied:
+**	mov	z2\.d, d0
+**	umulh	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_d0_u64_x_untied, svuint64_t, uint64_t,
+		 z2 = svmulh_n_u64_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_u64_x_tied1:
+**	mov	(z[0-9]+\.d), #2
+**	umulh	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u64_x_tied1, svuint64_t,
+		z0 = svmulh_n_u64_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_u64_x_untied:
+**	mov	z0\.d, #2
+**	umulh	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u64_x_untied, svuint64_t,
+		z0 = svmulh_n_u64_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u8.c
new file mode 100644
index 0000000..2ce8891
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/mulh_u8.c
@@ -0,0 +1,254 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** mulh_u8_m_tied1:
+**	umulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_m_tied1, svuint8_t,
+		z0 = svmulh_u8_m (p0, z0, z1),
+		z0 = svmulh_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (mulh_u8_m_tied2, svuint8_t,
+		z1 = svmulh_u8_m (p0, z0, z1),
+		z1 = svmulh_m (p0, z0, z1))
+
+/*
+** mulh_u8_m_untied:
+**	movprfx	z0, z1
+**	umulh	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_m_untied, svuint8_t,
+		z0 = svmulh_u8_m (p0, z1, z2),
+		z0 = svmulh_m (p0, z1, z2))
+
+/*
+** mulh_w0_u8_m_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	umulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u8_m_tied1, svuint8_t, uint8_t,
+		 z0 = svmulh_n_u8_m (p0, z0, x0),
+		 z0 = svmulh_m (p0, z0, x0))
+
+/*
+** mulh_w0_u8_m_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0, z1
+**	umulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u8_m_untied, svuint8_t, uint8_t,
+		 z0 = svmulh_n_u8_m (p0, z1, x0),
+		 z0 = svmulh_m (p0, z1, x0))
+
+/*
+** mulh_b0_u8_m_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	umulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_u8_m_tied1, svuint8_t, uint8_t,
+		 z1 = svmulh_n_u8_m (p0, z1, d0),
+		 z1 = svmulh_m (p0, z1, d0))
+
+/*
+** mulh_b0_u8_m_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1, z2
+**	umulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_u8_m_untied, svuint8_t, uint8_t,
+		 z1 = svmulh_n_u8_m (p0, z2, d0),
+		 z1 = svmulh_m (p0, z2, d0))
+
+/*
+** mulh_2_u8_m_tied1:
+**	mov	(z[0-9]+\.b), #2
+**	umulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u8_m_tied1, svuint8_t,
+		z0 = svmulh_n_u8_m (p0, z0, 2),
+		z0 = svmulh_m (p0, z0, 2))
+
+/*
+** mulh_2_u8_m_untied:
+**	mov	(z[0-9]+\.b), #2
+**	movprfx	z0, z1
+**	umulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u8_m_untied, svuint8_t,
+		z0 = svmulh_n_u8_m (p0, z1, 2),
+		z0 = svmulh_m (p0, z1, 2))
+
+/*
+** mulh_u8_z_tied1:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_z_tied1, svuint8_t,
+		z0 = svmulh_u8_z (p0, z0, z1),
+		z0 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u8_z_tied2:
+**	movprfx	z1\.b, p0/z, z1\.b
+**	umulh	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_z_tied2, svuint8_t,
+		z1 = svmulh_u8_z (p0, z0, z1),
+		z1 = svmulh_z (p0, z0, z1))
+
+/*
+** mulh_u8_z_untied:
+**	movprfx	z0\.b, p0/z, z1\.b
+**	umulh	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_z_untied, svuint8_t,
+		z0 = svmulh_u8_z (p0, z1, z2),
+		z0 = svmulh_z (p0, z1, z2))
+
+/*
+** mulh_w0_u8_z_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u8_z_tied1, svuint8_t, uint8_t,
+		 z0 = svmulh_n_u8_z (p0, z0, x0),
+		 z0 = svmulh_z (p0, z0, x0))
+
+/*
+** mulh_w0_u8_z_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, \1
+**	umulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u8_z_untied, svuint8_t, uint8_t,
+		 z0 = svmulh_n_u8_z (p0, z1, x0),
+		 z0 = svmulh_z (p0, z1, x0))
+
+/*
+** mulh_b0_u8_z_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z1\.b
+**	umulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_u8_z_tied1, svuint8_t, uint8_t,
+		 z1 = svmulh_n_u8_z (p0, z1, d0),
+		 z1 = svmulh_z (p0, z1, d0))
+
+/*
+** mulh_b0_u8_z_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, \1
+**	umulh	z1\.b, p0/m, z1\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_u8_z_untied, svuint8_t, uint8_t,
+		 z1 = svmulh_n_u8_z (p0, z2, d0),
+		 z1 = svmulh_z (p0, z2, d0))
+
+/*
+** mulh_u8_x_tied1:
+**	umulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_x_tied1, svuint8_t,
+		z0 = svmulh_u8_x (p0, z0, z1),
+		z0 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u8_x_tied2:
+**	umulh	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_x_tied2, svuint8_t,
+		z1 = svmulh_u8_x (p0, z0, z1),
+		z1 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_u8_x_untied:
+**	movprfx	z2, z0
+**	umulh	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_u8_x_untied, svuint8_t,
+		z2 = svmulh_u8_x (p0, z0, z1),
+		z2 = svmulh_x (p0, z0, z1))
+
+/*
+** mulh_w0_u8_x_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	umulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u8_x_tied1, svuint8_t, uint8_t,
+		 z0 = svmulh_n_u8_x (p0, z0, x0),
+		 z0 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_w0_u8_x_untied:
+**	mov	z1\.b, w0
+**	umulh	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_w0_u8_x_untied, svuint8_t, uint8_t,
+		 z1 = svmulh_n_u8_x (p0, z0, x0),
+		 z1 = svmulh_x (p0, z0, x0))
+
+/*
+** mulh_b0_u8_x_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	umulh	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_u8_x_tied1, svuint8_t, uint8_t,
+		 z1 = svmulh_n_u8_x (p0, z1, d0),
+		 z1 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_b0_u8_x_untied:
+**	mov	z2\.b, b0
+**	umulh	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (mulh_b0_u8_x_untied, svuint8_t, uint8_t,
+		 z2 = svmulh_n_u8_x (p0, z1, d0),
+		 z2 = svmulh_x (p0, z1, d0))
+
+/*
+** mulh_2_u8_x_tied1:
+**	mov	(z[0-9]+\.b), #2
+**	umulh	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u8_x_tied1, svuint8_t,
+		z0 = svmulh_n_u8_x (p0, z0, 2),
+		z0 = svmulh_x (p0, z0, 2))
+
+/*
+** mulh_2_u8_x_untied:
+**	mov	z0\.b, #2
+**	umulh	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (mulh_2_u8_x_untied, svuint8_t,
+		z0 = svmulh_n_u8_x (p0, z1, 2),
+		z0 = svmulh_x (p0, z1, 2))
+
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 4a04e9e..c4dd4f0 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -1765,9 +1765,9 @@ struct GTY(()) tree_function_decl {
 
   /* In a FUNCTION_DECL for which DECL_BUILT_IN holds, this is
      DECL_FUNCTION_CODE.  Otherwise unused.
-     ???  The bitfield needs to be able to hold all target function
-	  codes as well.  */
-  ENUM_BITFIELD(built_in_function) function_code : 12;
+     ???  Size should be able to hold all target function codes.  */
+  ENUM_BITFIELD(built_in_function) function_code : 32;
+
   ENUM_BITFIELD(built_in_class) built_in_class : 2;
 
   unsigned static_ctor_flag : 1;
@@ -1790,7 +1790,7 @@ struct GTY(()) tree_function_decl {
   unsigned has_debug_args_flag : 1;
   unsigned tm_clone_flag : 1;
   unsigned versioned_function : 1;
-  /* No bits left.  */
+  /* 12 bits left for future expansion.  */
 };
 
 struct GTY(()) tree_translation_unit_decl {
-- 
2.7.4

Reply via email to