kmclaughlin updated this revision to Diff 254558.
kmclaughlin added a comment.

Added patterns to AArch64SVEInstrInfo.td to support llvm.[s|u]add & 
llvm.[s|u]sub again, which was removed by my previous patch


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D77054/new/

https://reviews.llvm.org/D77054

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll

Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
===================================================================
--- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
@@ -134,6 +134,82 @@
   ret <vscale x 2 x i64> %out
 }
 
+; SQADD
+
+define <vscale x 16 x i8> @sqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sqadd_i8:
+; CHECK: sqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sqadd_i16:
+; CHECK: sqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sqadd_i32:
+; CHECK: sqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @sqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sqadd_i64:
+; CHECK: sqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
+; SQSUB
+
+define <vscale x 16 x i8> @sqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sqsub_i8:
+; CHECK: sqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @sqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sqsub_i16:
+; CHECK: sqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sqsub_i32:
+; CHECK: sqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sqsub_i64:
+; CHECK: sqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
 ; UDOT
 
 define <vscale x 4 x i32> @udot_i32(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
@@ -169,6 +245,82 @@
   ret <vscale x 4 x i32> %out
 }
 
+; UQADD
+
+define <vscale x 16 x i8> @uqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uqadd_i8:
+; CHECK: uqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @uqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uqadd_i16:
+; CHECK: uqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @uqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uqadd_i32:
+; CHECK: uqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @uqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: uqadd_i64:
+; CHECK: uqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
+; UQSUB
+
+define <vscale x 16 x i8> @uqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uqsub_i8:
+; CHECK: uqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @uqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uqsub_i16:
+; CHECK: uqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @uqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uqsub_i32:
+; CHECK: uqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @uqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: uqsub_i64:
+; CHECK: uqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
 declare <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
 declare <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
 declare <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
@@ -185,8 +337,28 @@
 declare <vscale x 4 x i32> @llvm.aarch64.sve.sdot.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.sdot.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
 
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
 declare <vscale x 4 x i32> @llvm.aarch64.sve.udot.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.udot.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 
 declare <vscale x 4 x i32> @llvm.aarch64.sve.udot.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.udot.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
@@ -0,0 +1,338 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+
+; SQADD
+
+define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sqadd_b_lowimm:
+; CHECK: sqadd z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @sqadd_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqadd_h_lowimm:
+; CHECK: sqadd z0.h, z0.h, #43
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @sqadd_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqadd_h_highimm:
+; CHECK: sqadd z0.h, z0.h, #2048
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @sqadd_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqadd_s_lowimm:
+; CHECK: sqadd z0.s, z0.s, #1
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @sqadd_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqadd_s_highimm:
+; CHECK: sqadd z0.s, z0.s, #8192
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @sqadd_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqadd_d_lowimm:
+; CHECK: sqadd z0.d, z0.d, #255
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @sqadd_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqadd_d_highimm:
+; CHECK: sqadd z0.d, z0.d, #65280
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+; SQSUB
+
+define <vscale x 16 x i8> @sqsub_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sqsub_b_lowimm:
+; CHECK: sqsub z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @sqsub_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqsub_h_lowimm:
+; CHECK: sqsub z0.h, z0.h, #43
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @sqsub_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqsub_h_highimm:
+; CHECK: sqsub z0.h, z0.h, #2048
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @sqsub_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqsub_s_lowimm:
+; CHECK: sqsub z0.s, z0.s, #1
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @sqsub_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqsub_s_highimm:
+; CHECK: sqsub z0.s, z0.s, #8192
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @sqsub_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqsub_d_lowimm:
+; CHECK: sqsub z0.d, z0.d, #255
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @sqsub_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqsub_d_highimm:
+; CHECK: sqsub z0.d, z0.d, #65280
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+; UQADD
+
+define <vscale x 16 x i8> @uqadd_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: uqadd_b_lowimm:
+; CHECK: uqadd z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @uqadd_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqadd_h_lowimm:
+; CHECK: uqadd z0.h, z0.h, #43
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @uqadd_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqadd_h_highimm:
+; CHECK: uqadd z0.h, z0.h, #2048
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @uqadd_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqadd_s_lowimm:
+; CHECK: uqadd z0.s, z0.s, #1
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+; UQSUB
+
+define <vscale x 16 x i8> @uqsub_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: uqsub_b_lowimm:
+; CHECK: uqsub z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @uqsub_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqsub_h_lowimm:
+; CHECK: uqsub z0.h, z0.h, #43
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 8 x i16> @uqsub_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqsub_h_highimm:
+; CHECK: uqsub z0.h, z0.h, #2048
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @uqsub_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqsub_s_lowimm:
+; CHECK: uqsub z0.s, z0.s, #1
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 4 x i32> @uqsub_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqsub_s_highimm:
+; CHECK: uqsub z0.s, z0.s, #8192
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @uqsub_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqsub_d_lowimm:
+; CHECK: uqsub z0.d, z0.d, #255
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @uqsub_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqsub_d_highimm:
+; CHECK: uqsub z0.d, z0.d, #65280
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+
+define <vscale x 4 x i32> @uqadd_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqadd_s_highimm:
+; CHECK: uqadd z0.s, z0.s, #8192
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @uqadd_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqadd_d_lowimm:
+; CHECK: uqadd z0.d, z0.d, #255
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 2 x i64> @uqadd_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqadd_d_highimm:
+; CHECK: uqadd z0.d, z0.d, #65280
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -148,10 +148,30 @@
 
   defm ADD_ZZZ   : sve_int_bin_cons_arit_0<0b000, "add", add>;
   defm SUB_ZZZ   : sve_int_bin_cons_arit_0<0b001, "sub", sub>;
-  defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", saddsat>;
-  defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", uaddsat>;
-  defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", ssubsat>;
-  defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", usubsat>;
+  defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", int_aarch64_sve_sqadd_x>;
+  defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", int_aarch64_sve_uqadd_x>;
+  defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", int_aarch64_sve_sqsub_x>;
+  defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", int_aarch64_sve_uqsub_x>;
+
+  def : Pat<(nxv16i8 (saddsat nxv16i8:$Op1, nxv16i8:$Op2)), (SQADD_ZZZ_B $Op1, $Op2)>;
+  def : Pat<(nxv8i16 (saddsat nxv8i16:$Op1, nxv8i16:$Op2)), (SQADD_ZZZ_H $Op1, $Op2)>;
+  def : Pat<(nxv4i32 (saddsat nxv4i32:$Op1, nxv4i32:$Op2)), (SQADD_ZZZ_S $Op1, $Op2)>;
+  def : Pat<(nxv2i64 (saddsat nxv2i64:$Op1, nxv2i64:$Op2)), (SQADD_ZZZ_D $Op1, $Op2)>;
+
+  def : Pat<(nxv16i8 (uaddsat nxv16i8:$Op1, nxv16i8:$Op2)), (UQADD_ZZZ_B $Op1, $Op2)>;
+  def : Pat<(nxv8i16 (uaddsat nxv8i16:$Op1, nxv8i16:$Op2)), (UQADD_ZZZ_H $Op1, $Op2)>;
+  def : Pat<(nxv4i32 (uaddsat nxv4i32:$Op1, nxv4i32:$Op2)), (UQADD_ZZZ_S $Op1, $Op2)>;
+  def : Pat<(nxv2i64 (uaddsat nxv2i64:$Op1, nxv2i64:$Op2)), (UQADD_ZZZ_D $Op1, $Op2)>;
+
+  def : Pat<(nxv16i8 (ssubsat nxv16i8:$Op1, nxv16i8:$Op2)), (SQSUB_ZZZ_B $Op1, $Op2)>;
+  def : Pat<(nxv8i16 (ssubsat nxv8i16:$Op1, nxv8i16:$Op2)), (SQSUB_ZZZ_H $Op1, $Op2)>;
+  def : Pat<(nxv4i32 (ssubsat nxv4i32:$Op1, nxv4i32:$Op2)), (SQSUB_ZZZ_S $Op1, $Op2)>;
+  def : Pat<(nxv2i64 (ssubsat nxv2i64:$Op1, nxv2i64:$Op2)), (SQSUB_ZZZ_D $Op1, $Op2)>;
+
+  def : Pat<(nxv16i8 (usubsat nxv16i8:$Op1, nxv16i8:$Op2)), (UQSUB_ZZZ_B $Op1, $Op2)>;
+  def : Pat<(nxv8i16 (usubsat nxv8i16:$Op1, nxv8i16:$Op2)), (UQSUB_ZZZ_H $Op1, $Op2)>;
+  def : Pat<(nxv4i32 (usubsat nxv4i32:$Op1, nxv4i32:$Op2)), (UQSUB_ZZZ_S $Op1, $Op2)>;
+  def : Pat<(nxv2i64 (usubsat nxv2i64:$Op1, nxv2i64:$Op2)), (UQSUB_ZZZ_D $Op1, $Op2)>;
 
   defm AND_ZZZ : sve_int_bin_cons_log<0b00, "and", and>;
   defm ORR_ZZZ : sve_int_bin_cons_log<0b01, "orr", or>;
@@ -170,10 +190,36 @@
   defm ADD_ZI   : sve_int_arith_imm0<0b000, "add", add>;
   defm SUB_ZI   : sve_int_arith_imm0<0b001, "sub", sub>;
   defm SUBR_ZI  : sve_int_arith_imm0_subr<0b011, "subr", sub>;
-  defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat>;
-  defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>;
-  defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>;
-  defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>;
+  defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", int_aarch64_sve_sqadd_x>;
+  defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", int_aarch64_sve_uqadd_x>;
+  defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", int_aarch64_sve_sqsub_x>;
+  defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", int_aarch64_sve_uqsub_x>;
+
+  multiclass qaddsub_imm<ValueType Ty, ZPRRegOp zprty, ValueType ImmTy,
+                         ComplexPattern cpx, Instruction Op, Instruction Inst> {
+    def _default : Pat<(Ty (Op (Ty zprty:$Op1), (Ty (AArch64dup (ImmTy (cpx i32:$imm, i32:$shift)))))),
+                       (Inst $Op1, i32:$imm, i32:$shift)>;
+  }
+
+  defm Pat_SQADD_ZI_B : qaddsub_imm<nxv16i8, ZPR8,  i32, SVEAddSubImm8Pat,  saddsat, SQADD_ZI_B>;
+  defm Pat_SQADD_ZI_H : qaddsub_imm<nxv8i16, ZPR16, i32, SVEAddSubImm16Pat, saddsat, SQADD_ZI_H>;
+  defm Pat_SQADD_ZI_S : qaddsub_imm<nxv4i32, ZPR32, i32, SVEAddSubImm32Pat, saddsat, SQADD_ZI_S>;
+  defm Pat_SQADD_ZI_D : qaddsub_imm<nxv2i64, ZPR64, i64, SVEAddSubImm64Pat, saddsat, SQADD_ZI_D>;
+
+  defm Pat_UQADD_ZI_B : qaddsub_imm<nxv16i8, ZPR8,  i32, SVEAddSubImm8Pat,  uaddsat, UQADD_ZI_B>;
+  defm Pat_UQADD_ZI_H : qaddsub_imm<nxv8i16, ZPR16, i32, SVEAddSubImm16Pat, uaddsat, UQADD_ZI_H>;
+  defm Pat_UQADD_ZI_S : qaddsub_imm<nxv4i32, ZPR32, i32, SVEAddSubImm32Pat, uaddsat, UQADD_ZI_S>;
+  defm Pat_UQADD_ZI_D : qaddsub_imm<nxv2i64, ZPR64, i64, SVEAddSubImm64Pat, uaddsat, UQADD_ZI_D>;
+
+  defm Pat_SQSUB_ZI_B : qaddsub_imm<nxv16i8, ZPR8,  i32, SVEAddSubImm8Pat,  ssubsat, SQSUB_ZI_B>;
+  defm Pat_SQSUB_ZI_H : qaddsub_imm<nxv8i16, ZPR16, i32, SVEAddSubImm16Pat, ssubsat, SQSUB_ZI_H>;
+  defm Pat_SQSUB_ZI_S : qaddsub_imm<nxv4i32, ZPR32, i32, SVEAddSubImm32Pat, ssubsat, SQSUB_ZI_S>;
+  defm Pat_SQSUB_ZI_D : qaddsub_imm<nxv2i64, ZPR64, i64, SVEAddSubImm64Pat, ssubsat, SQSUB_ZI_D>;
+
+  defm Pat_UQSUB_ZI_B : qaddsub_imm<nxv16i8, ZPR8,  i32, SVEAddSubImm8Pat,  usubsat, UQSUB_ZI_B>;
+  defm Pat_UQSUB_ZI_H : qaddsub_imm<nxv8i16, ZPR16, i32, SVEAddSubImm16Pat, usubsat, UQSUB_ZI_H>;
+  defm Pat_UQSUB_ZI_S : qaddsub_imm<nxv4i32, ZPR32, i32, SVEAddSubImm32Pat, usubsat, UQSUB_ZI_S>;
+  defm Pat_UQSUB_ZI_D : qaddsub_imm<nxv2i64, ZPR64, i64, SVEAddSubImm64Pat, usubsat, UQSUB_ZI_D>;
 
   defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>;
   defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>;
Index: llvm/include/llvm/IR/IntrinsicsAArch64.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1426,6 +1426,11 @@
 def int_aarch64_sve_udot      : AdvSIMD_SVE_DOT_Intrinsic;
 def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
 
+def int_aarch64_sve_sqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_sqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
+
 // Shifts
 
 def int_aarch64_sve_asr      : AdvSIMD_Pred2VectorArg_Intrinsic;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to