kmclaughlin created this revision.
kmclaughlin added reviewers: sdesmalen, c-rhodes, dancgr, efriedma, 
cameron.mcinally.
Herald added subscribers: danielkiss, psnobl, rkruppe, hiraditya, 
kristof.beyls, tschuett.
Herald added a reviewer: rengolin.
Herald added a project: LLVM.

Adds the following intrinsics:

- @llvm.aarch64.sve.[s|u]qadd.x
- @llvm.aarch64.sve.[s|u]qsub.x


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D77054

Files:
  llvm/include/llvm/IR/IntrinsicsAArch64.td
  llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
  llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
  llvm/test/CodeGen/AArch64/sve-int-arith.ll
  llvm/test/CodeGen/AArch64/sve-int-imm.ll
  llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll

Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
===================================================================
--- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
+++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith.ll
@@ -134,6 +134,82 @@
   ret <vscale x 2 x i64> %out
 }
 
+; SQADD
+
+define <vscale x 16 x i8> @sqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sqadd_i8:
+; CHECK: sqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sqadd_i16:
+; CHECK: sqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sqadd_i32:
+; CHECK: sqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @sqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sqadd_i64:
+; CHECK: sqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
+; SQSUB
+
+define <vscale x 16 x i8> @sqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sqsub_i8:
+; CHECK: sqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @sqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sqsub_i16:
+; CHECK: sqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sqsub_i32:
+; CHECK: sqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sqsub_i64:
+; CHECK: sqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
 ; UDOT
 
 define <vscale x 4 x i32> @udot_i32(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
@@ -169,6 +245,82 @@
   ret <vscale x 4 x i32> %out
 }
 
+; UQADD
+
+define <vscale x 16 x i8> @uqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uqadd_i8:
+; CHECK: uqadd z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @uqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uqadd_i16:
+; CHECK: uqadd z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @uqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uqadd_i32:
+; CHECK: uqadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @uqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: uqadd_i64:
+; CHECK: uqadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
+; UQSUB
+
+define <vscale x 16 x i8> @uqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uqsub_i8:
+; CHECK: uqsub z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %b)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @uqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uqsub_i16:
+; CHECK: uqsub z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %b)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @uqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uqsub_i32:
+; CHECK: uqsub z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %b)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @uqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: uqsub_i64:
+; CHECK: uqsub z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %b)
+  ret <vscale x 2 x i64> %out
+}
+
 declare <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
 declare <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
 declare <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
@@ -185,8 +337,28 @@
 declare <vscale x 4 x i32> @llvm.aarch64.sve.sdot.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.sdot.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
 
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
 declare <vscale x 4 x i32> @llvm.aarch64.sve.udot.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.udot.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 
 declare <vscale x 4 x i32> @llvm.aarch64.sve.udot.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.udot.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
Index: llvm/test/CodeGen/AArch64/sve-int-imm.ll
===================================================================
--- llvm/test/CodeGen/AArch64/sve-int-imm.ll
+++ llvm/test/CodeGen/AArch64/sve-int-imm.ll
@@ -218,302 +218,338 @@
 }
 
 ; SQADD
-define <vscale x 16 x i8> @sqadd_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: sqadd_i8_low
-; CHECK: sqadd  z0.b, z0.b, #30
+
+define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sqadd_b_lowimm:
+; CHECK: sqadd z0.b, z0.b, #27
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
   %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
-  ret <vscale x 16 x i8> %res
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @sqadd_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqadd_i16_low
-; CHECK: sqadd  z0.h, z0.h, #30
+define <vscale x 8 x i16> @sqadd_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqadd_h_lowimm:
+; CHECK: sqadd z0.h, z0.h, #43
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
   %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 8 x i16> @sqadd_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqadd_i16_high
-; CHECK: sqadd  z0.h, z0.h, #1024
+define <vscale x 8 x i16> @sqadd_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqadd_h_highimm:
+; CHECK: sqadd z0.h, z0.h, #2048
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
   %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @sqadd_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqadd_i32_low
-; CHECK: sqadd  z0.s, z0.s, #30
+define <vscale x 4 x i32> @sqadd_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqadd_s_lowimm:
+; CHECK: sqadd z0.s, z0.s, #1
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
   %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 4 x i32> @sqadd_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqadd_i32_high
-; CHECK: sqadd  z0.s, z0.s, #1024
+define <vscale x 4 x i32> @sqadd_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqadd_s_highimm:
+; CHECK: sqadd z0.s, z0.s, #8192
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
   %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @sqadd_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqadd_i64_low
-; CHECK: sqadd  z0.d, z0.d, #30
+define <vscale x 2 x i64> @sqadd_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqadd_d_lowimm:
+; CHECK: sqadd z0.d, z0.d, #255
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
   %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 2 x i64> @sqadd_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqadd_i64_high
-; CHECK: sqadd  z0.d, z0.d, #1024
+define <vscale x 2 x i64> @sqadd_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqadd_d_highimm:
+; CHECK: sqadd z0.d, z0.d, #65280
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
   %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
 }
 
-; UQADD
-define <vscale x 16 x i8> @uqadd_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: uqadd_i8_low
-; CHECK: uqadd  z0.b, z0.b, #30
+; SQSUB
+
+define <vscale x 16 x i8> @sqsub_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sqsub_b_lowimm:
+; CHECK: sqsub z0.b, z0.b, #27
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
   %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
-  ret <vscale x 16 x i8> %res
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @uqadd_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqadd_i16_low
-; CHECK: uqadd  z0.h, z0.h, #30
+define <vscale x 8 x i16> @sqsub_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqsub_h_lowimm:
+; CHECK: sqsub z0.h, z0.h, #43
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
   %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 8 x i16> @uqadd_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqadd_i16_high
-; CHECK: uqadd  z0.h, z0.h, #1024
+define <vscale x 8 x i16> @sqsub_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: sqsub_h_highimm:
+; CHECK: sqsub z0.h, z0.h, #2048
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
   %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @uqadd_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqadd_i32_low
-; CHECK: uqadd  z0.s, z0.s, #30
+define <vscale x 4 x i32> @sqsub_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqsub_s_lowimm:
+; CHECK: sqsub z0.s, z0.s, #1
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
   %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 4 x i32> @uqadd_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqadd_i32_high
-; CHECK: uqadd  z0.s, z0.s, #1024
+define <vscale x 4 x i32> @sqsub_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqsub_s_highimm:
+; CHECK: sqsub z0.s, z0.s, #8192
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
   %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @uqadd_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqadd_i64_low
-; CHECK: uqadd  z0.d, z0.d, #30
+define <vscale x 2 x i64> @sqsub_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqsub_d_lowimm:
+; CHECK: sqsub z0.d, z0.d, #255
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
   %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 2 x i64> @uqadd_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqadd_i64_high
-; CHECK: uqadd  z0.d, z0.d, #1024
+define <vscale x 2 x i64> @sqsub_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqsub_d_highimm:
+; CHECK: sqsub z0.d, z0.d, #65280
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
   %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
 }
 
-; SQSUB
-define <vscale x 16 x i8> @sqsub_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: sqsub_i8_low
-; CHECK: sqsub  z0.b, z0.b, #30
+; UQADD
+
+define <vscale x 16 x i8> @uqadd_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: uqadd_b_lowimm:
+; CHECK: uqadd z0.b, z0.b, #27
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
   %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
-  ret <vscale x 16 x i8> %res
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 8 x i16> @sqsub_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqsub_i16_low
-; CHECK: sqsub  z0.h, z0.h, #30
+define <vscale x 8 x i16> @uqadd_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqadd_h_lowimm:
+; CHECK: uqadd z0.h, z0.h, #43
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
   %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 8 x i16> @sqsub_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: sqsub_i16_high
-; CHECK: sqsub  z0.h, z0.h, #1024
+define <vscale x 8 x i16> @uqadd_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqadd_h_highimm:
+; CHECK: uqadd z0.h, z0.h, #2048
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
   %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 4 x i32> @sqsub_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqsub_i32_low
-; CHECK: sqsub  z0.s, z0.s, #30
+define <vscale x 4 x i32> @uqadd_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqadd_s_lowimm:
+; CHECK: uqadd z0.s, z0.s, #1
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
   %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 4 x i32> @sqsub_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: sqsub_i32_high
-; CHECK: sqsub  z0.s, z0.s, #1024
+; UQSUB
+
+define <vscale x 16 x i8> @uqsub_b_lowimm(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: uqsub_b_lowimm:
+; CHECK: uqsub z0.b, z0.b, #27
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a,
+                                                                   <vscale x 16 x i8> %splat)
+  ret <vscale x 16 x i8> %out
 }
 
-define <vscale x 2 x i64> @sqsub_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqsub_i64_low
-; CHECK: sqsub  z0.d, z0.d, #30
+define <vscale x 8 x i16> @uqsub_h_lowimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqsub_h_lowimm:
+; CHECK: uqsub z0.h, z0.h, #43
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
+  %elt = insertelement <vscale x 8 x i16> undef, i16 43, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-define <vscale x 2 x i64> @sqsub_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: sqsub_i64_high
-; CHECK: sqsub  z0.d, z0.d, #1024
+define <vscale x 8 x i16> @uqsub_h_highimm(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: uqsub_h_highimm:
+; CHECK: uqsub z0.h, z0.h, #2048
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
+  %elt = insertelement <vscale x 8 x i16> undef, i16 2048, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a,
+                                                                   <vscale x 8 x i16> %splat)
+  ret <vscale x 8 x i16> %out
 }
 
-; UQSUB
-define <vscale x 16 x i8> @uqsub_i8_low(<vscale x 16 x i8> %a) {
-; CHECK-LABEL: uqsub_i8_low
-; CHECK: uqsub  z0.b, z0.b, #30
+define <vscale x 4 x i32> @uqsub_s_lowimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqsub_s_lowimm:
+; CHECK: uqsub z0.s, z0.s, #1
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 16 x i8> undef, i8 30, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
-  %res =  call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %splat)
-  ret <vscale x 16 x i8> %res
+  %elt = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 8 x i16> @uqsub_i16_low(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqsub_i16_low
-; CHECK: uqsub  z0.h, z0.h, #30
+define <vscale x 4 x i32> @uqsub_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqsub_s_highimm:
+; CHECK: uqsub z0.s, z0.s, #8192
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 30, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 8 x i16> @uqsub_i16_high(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: uqsub_i16_high
-; CHECK: uqsub  z0.h, z0.h, #1024
+define <vscale x 2 x i64> @uqsub_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqsub_d_lowimm:
+; CHECK: uqsub z0.d, z0.d, #255
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 8 x i16> undef, i16 1024, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
-  %res =  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %splat)
-  ret <vscale x 8 x i16> %res
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 4 x i32> @uqsub_i32_low(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqsub_i32_low
-; CHECK: uqsub  z0.s, z0.s, #30
+define <vscale x 2 x i64> @uqsub_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqsub_d_highimm:
+; CHECK: uqsub z0.d, z0.d, #65280
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 30, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 4 x i32> @uqsub_i32_high(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: uqsub_i32_high
-; CHECK: uqsub  z0.s, z0.s, #1024
+
+define <vscale x 4 x i32> @uqadd_s_highimm(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: uqadd_s_highimm:
+; CHECK: uqadd z0.s, z0.s, #8192
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 4 x i32> undef, i32 1024, i32 0
+  %elt = insertelement <vscale x 4 x i32> undef, i32 8192, i32 0
   %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
-  %res =  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %splat)
-  ret <vscale x 4 x i32> %res
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a,
+                                                                   <vscale x 4 x i32> %splat)
+  ret <vscale x 4 x i32> %out
 }
 
-define <vscale x 2 x i64> @uqsub_i64_low(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqsub_i64_low
-; CHECK: uqsub  z0.d, z0.d, #30
+define <vscale x 2 x i64> @uqadd_d_lowimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqadd_d_lowimm:
+; CHECK: uqadd z0.d, z0.d, #255
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 30, i32 0
+  %elt = insertelement <vscale x 2 x i64> undef, i64 255, i32 0
   %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
 }
 
-define <vscale x 2 x i64> @uqsub_i64_high(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: uqsub_i64_high
-; CHECK: uqsub  z0.d, z0.d, #1024
+define <vscale x 2 x i64> @uqadd_d_highimm(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: uqadd_d_highimm:
+; CHECK: uqadd z0.d, z0.d, #65280
 ; CHECK-NEXT: ret
-  %elt = insertelement <vscale x 2 x i64> undef, i64 1024, i32 0
+  %elt = insertelement <vscale x 2 x i64> undef, i64 65280, i32 0
   %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  %res =  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %splat)
-  ret <vscale x 2 x i64> %res
-}
-
-declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a,
+                                                                   <vscale x 2 x i64> %splat)
+  ret <vscale x 2 x i64> %out
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
Index: llvm/test/CodeGen/AArch64/sve-int-arith.ll
===================================================================
--- llvm/test/CodeGen/AArch64/sve-int-arith.ll
+++ llvm/test/CodeGen/AArch64/sve-int-arith.ll
@@ -64,136 +64,6 @@
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 2 x i64> @sqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: sqadd_i64
-; CHECK: sqadd  z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>  %a, <vscale x 2 x i64> %b)
-  ret <vscale x 2 x i64> %res
-}
-
-define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: sqadd_i32
-; CHECK: sqadd  z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>  %a, <vscale x 4 x i32> %b)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: sqadd_i16
-; CHECK: sqadd  z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>  %a, <vscale x 8 x i16> %b)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 16 x i8> @sqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: sqadd_i8
-; CHECK: sqadd  z0.b, z0.b, z1.b
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>  %a, <vscale x 16 x i8> %b)
-  ret <vscale x 16 x i8> %res
-}
-
-
-define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: sqsub_i64
-; CHECK: sqsub  z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>  %a, <vscale x 2 x i64> %b)
-  ret <vscale x 2 x i64> %res
-}
-
-define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: sqsub_i32
-; CHECK: sqsub  z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>  %a, <vscale x 4 x i32> %b)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 8 x i16> @sqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: sqsub_i16
-; CHECK: sqsub  z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>  %a, <vscale x 8 x i16> %b)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 16 x i8> @sqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: sqsub_i8
-; CHECK: sqsub  z0.b, z0.b, z1.b
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>  %a, <vscale x 16 x i8> %b)
-  ret <vscale x 16 x i8> %res
-}
-
-
-define <vscale x 2 x i64> @uqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: uqadd_i64
-; CHECK: uqadd  z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>  %a, <vscale x 2 x i64> %b)
-  ret <vscale x 2 x i64> %res
-}
-
-define <vscale x 4 x i32> @uqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: uqadd_i32
-; CHECK: uqadd  z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>  %a, <vscale x 4 x i32> %b)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 8 x i16> @uqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: uqadd_i16
-; CHECK: uqadd  z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>  %a, <vscale x 8 x i16> %b)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 16 x i8> @uqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: uqadd_i8
-; CHECK: uqadd  z0.b, z0.b, z1.b
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>  %a, <vscale x 16 x i8> %b)
-  ret <vscale x 16 x i8> %res
-}
-
-
-define <vscale x 2 x i64> @uqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: uqsub_i64
-; CHECK: uqsub  z0.d, z0.d, z1.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>  %a, <vscale x 2 x i64> %b)
-  ret <vscale x 2 x i64> %res
-}
-
-define <vscale x 4 x i32> @uqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: uqsub_i32
-; CHECK: uqsub  z0.s, z0.s, z1.s
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>  %a, <vscale x 4 x i32> %b)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 8 x i16> @uqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: uqsub_i16
-; CHECK: uqsub  z0.h, z0.h, z1.h
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>  %a, <vscale x 8 x i16> %b)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 16 x i8> @uqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: uqsub_i8
-; CHECK: uqsub  z0.b, z0.b, z1.b
-; CHECK-NEXT: ret
-  %res = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>  %a, <vscale x 16 x i8> %b)
-  ret <vscale x 16 x i8> %res
-}
 
 declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
===================================================================
--- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -148,10 +148,10 @@
 
   defm ADD_ZZZ   : sve_int_bin_cons_arit_0<0b000, "add", add>;
   defm SUB_ZZZ   : sve_int_bin_cons_arit_0<0b001, "sub", sub>;
-  defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", saddsat>;
-  defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", uaddsat>;
-  defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", ssubsat>;
-  defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", usubsat>;
+  defm SQADD_ZZZ : sve_int_bin_cons_arit_0<0b100, "sqadd", int_aarch64_sve_sqadd_x>;
+  defm UQADD_ZZZ : sve_int_bin_cons_arit_0<0b101, "uqadd", int_aarch64_sve_uqadd_x>;
+  defm SQSUB_ZZZ : sve_int_bin_cons_arit_0<0b110, "sqsub", int_aarch64_sve_sqsub_x>;
+  defm UQSUB_ZZZ : sve_int_bin_cons_arit_0<0b111, "uqsub", int_aarch64_sve_uqsub_x>;
 
   defm AND_ZZZ : sve_int_bin_cons_log<0b00, "and", and>;
   defm ORR_ZZZ : sve_int_bin_cons_log<0b01, "orr", or>;
@@ -170,10 +170,10 @@
   defm ADD_ZI   : sve_int_arith_imm0<0b000, "add", add>;
   defm SUB_ZI   : sve_int_arith_imm0<0b001, "sub", sub>;
   defm SUBR_ZI  : sve_int_arith_imm0_subr<0b011, "subr", sub>;
-  defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", saddsat>;
-  defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", uaddsat>;
-  defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", ssubsat>;
-  defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", usubsat>;
+  defm SQADD_ZI : sve_int_arith_imm0<0b100, "sqadd", int_aarch64_sve_sqadd_x>;
+  defm UQADD_ZI : sve_int_arith_imm0<0b101, "uqadd", int_aarch64_sve_uqadd_x>;
+  defm SQSUB_ZI : sve_int_arith_imm0<0b110, "sqsub", int_aarch64_sve_sqsub_x>;
+  defm UQSUB_ZI : sve_int_arith_imm0<0b111, "uqsub", int_aarch64_sve_uqsub_x>;
 
   defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>;
   defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>;
Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
===================================================================
--- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -184,10 +184,6 @@
     addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass);
 
     for (auto VT : { MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64 }) {
-      setOperationAction(ISD::SADDSAT, VT, Legal);
-      setOperationAction(ISD::UADDSAT, VT, Legal);
-      setOperationAction(ISD::SSUBSAT, VT, Legal);
-      setOperationAction(ISD::USUBSAT, VT, Legal);
       setOperationAction(ISD::SMAX, VT, Legal);
       setOperationAction(ISD::UMAX, VT, Legal);
       setOperationAction(ISD::SMIN, VT, Legal);
Index: llvm/include/llvm/IR/IntrinsicsAArch64.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1426,6 +1426,11 @@
 def int_aarch64_sve_udot      : AdvSIMD_SVE_DOT_Intrinsic;
 def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
 
+def int_aarch64_sve_sqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_sqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
+
 // Shifts
 
 def int_aarch64_sve_asr      : AdvSIMD_Pred2VectorArg_Intrinsic;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to