llvmbot wrote:

<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Pengcheng Wang (wangpc-pp)

<details>
<summary>Changes</summary>

For abs operation, we can synthesize it via vabd.vx with x0 register.


---

Patch is 69.92 KiB, truncated to 20.00 KiB below, full version: 
https://github.com/llvm/llvm-project/pull/180142.diff


9 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+16-11) 
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td (+20) 
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td (+26) 
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td (-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll (+94) 
- (modified) llvm/test/CodeGen/RISCV/rvv/abs-vp.ll (+319) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll (+4-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll (+247) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll (+107) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp 
b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6e236fcdae82d..32960a1cda8e3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -996,7 +996,7 @@ RISCVTargetLowering::RISCVTargetLowering(const 
TargetMachine &TM,
                          Legal);
 
       if (Subtarget.hasStdExtZvabd())
-        setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
+        setOperationAction({ISD::ABDS, ISD::ABDU, ISD::ABS}, VT, Legal);
       else
         setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
 
@@ -13702,17 +13702,22 @@ SDValue RISCVTargetLowering::lowerABS(SDValue Op, 
SelectionDAG &DAG) const {
   } else
     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
 
-  SDValue SplatZero = DAG.getNode(
-      RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
-      DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
-  SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
-                             DAG.getUNDEF(ContainerVT), Mask, VL);
-  SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
-                            DAG.getUNDEF(ContainerVT), Mask, VL);
-
+  SDValue Result;
+  if (Subtarget.hasStdExtZvabd()) {
+    Result = DAG.getNode(RISCVISD::ABS_VL, DL, ContainerVT, X,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  } else {
+    SDValue SplatZero = DAG.getNode(
+        RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+        DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
+    SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
+                               DAG.getUNDEF(ContainerVT), Mask, VL);
+    Result = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  }
   if (VT.isFixedLengthVector())
-    Max = convertFromScalableVector(VT, Max, DAG, Subtarget);
-  return Max;
+    Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+  return Result;
 }
 
 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 46b1cefcf6dc0..46dd45876a384 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1955,6 +1955,26 @@ multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, 
string suffix = ""> {
   }
 }
 
+multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
+                         Predicate predicate = HasStdExtZvbb> {
+  foreach vti = AllIntegerVectors in {
+    let Predicates = !listconcat([predicate],
+                                 GetVTypePredicates<vti>.Predicates) in {
+      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
+                                (vti.Vector vti.RegClass:$passthru),
+                                (vti.Mask VMV0:$vm),
+                                VLOpFrag)),
+                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
+                   vti.RegClass:$passthru,
+                   vti.RegClass:$rs1,
+                   (vti.Mask VMV0:$vm),
+                   GPR:$vl,
+                   vti.Log2SEW,
+                   TAIL_AGNOSTIC)>;
+    }
+  }
+}
+
 
//===----------------------------------------------------------------------===//
 // Patterns.
 
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index c73d0f20a7e8b..14e37b8e269a2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -29,7 +29,23 @@ let Predicates = [HasStdExtZvabd] in {
 
//===----------------------------------------------------------------------===//
 // Pseudos
 
//===----------------------------------------------------------------------===//
+
+multiclass PseudoVABS {
+  foreach m = MxList in {
+    defvar mx = m.MX;
+    let VLMul = m.value in {
+      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+                       SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, 
forcePassthruRead=true>;
+      def "_V_" # mx # "_MASK" :
+        VPseudoUnaryMask<m.vrclass, m.vrclass>,
+        RISCVMaskedPseudo<MaskIdx=2>,
+        SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+    }
+  }
+}
+
 let Predicates = [HasStdExtZvabd] in {
+  defm PseudoVABS : PseudoVABS;
   defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
   defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
 } // Predicates = [HasStdExtZvabd]
@@ -38,6 +54,7 @@ let Predicates = [HasStdExtZvabd] in {
 // CodeGen Patterns
 
//===----------------------------------------------------------------------===//
 let HasPassthruOp = true, HasMaskOp = true in {
+def riscv_abs_vl  : RVSDNode<"ABS_VL", SDT_RISCVIntUnOp_VL>;
 def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, 
[SDNPCommutative]>;
 def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, 
[SDNPCommutative]>;
 } // let HasPassthruOp = true, HasMaskOp = true
@@ -48,4 +65,13 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU">;
 
 defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD">;
 defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU">;
+
+foreach vti = AllIntegerVectors in {
+  def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
+            (!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)
+                    (vti.Vector (IMPLICIT_DEF)),
+                    vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+}
+
+defm : VPatUnaryVL_V<riscv_abs_vl, "PseudoVABS", HasStdExtZvabd>;
 } // Predicates = [HasStdExtZvabd]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 2b94de914b995..3a5ddb8b2b994 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -719,26 +719,6 @@ defm : VPatBinarySDNode_VV_VX<clmulh, "PseudoVCLMULH", 
I64IntegerVectors, ExtraP
 // VL patterns
 
//===----------------------------------------------------------------------===//
 
-multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
-                         Predicate predicate = HasStdExtZvbb> {
-  foreach vti = AllIntegerVectors in {
-    let Predicates = !listconcat([predicate],
-                                 GetVTypePredicates<vti>.Predicates) in {
-      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
-                                (vti.Vector vti.RegClass:$passthru),
-                                (vti.Mask VMV0:$vm),
-                                VLOpFrag)),
-                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
-                   vti.RegClass:$passthru,
-                   vti.RegClass:$rs1,
-                   (vti.Mask VMV0:$vm),
-                   GPR:$vl,
-                   vti.Log2SEW,
-                   TAIL_AGNOSTIC)>;
-    }
-  }
-}
-
 foreach vti = AllIntegerVectors in {
   let Predicates = !listconcat([HasStdExtZvkb],
                                GetVTypePredicates<vti>.Predicates) in {
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll 
b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
index 7260cca64a476..868e6766fda00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd 
-verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd 
-verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
 
 define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
 ; CHECK-LABEL: vabs_nxv1i16:
@@ -9,6 +13,12 @@ define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16> %v, i1 
false)
   ret <vscale x 1 x i16> %r
 }
@@ -20,6 +30,12 @@ define <vscale x 2 x i16> @vabs_nxv2i16(<vscale x 2 x i16> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> %v, i1 
false)
   ret <vscale x 2 x i16> %r
 }
@@ -31,6 +47,12 @@ define <vscale x 4 x i16> @vabs_nxv4i16(<vscale x 4 x i16> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %v, i1 
false)
   ret <vscale x 4 x i16> %r
 }
@@ -42,6 +64,12 @@ define <vscale x 8 x i16> @vabs_nxv8i16(<vscale x 8 x i16> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %v, i1 
false)
   ret <vscale x 8 x i16> %r
 }
@@ -53,6 +81,12 @@ define <vscale x 16 x i16> @vabs_nxv16i16(<vscale x 16 x 
i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %v, i1 
false)
   ret <vscale x 16 x i16> %r
 }
@@ -64,6 +98,12 @@ define <vscale x 32 x i16> @vabs_nxv32i16(<vscale x 32 x 
i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv32i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> %v, i1 
false)
   ret <vscale x 32 x i16> %r
 }
@@ -75,6 +115,12 @@ define <vscale x 1 x i32> @vabs_nxv1i32(<vscale x 1 x i32> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32> %v, i1 
false)
   ret <vscale x 1 x i32> %r
 }
@@ -86,6 +132,12 @@ define <vscale x 2 x i32> @vabs_nxv2i32(<vscale x 2 x i32> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> %v, i1 
false)
   ret <vscale x 2 x i32> %r
 }
@@ -97,6 +149,12 @@ define <vscale x 4 x i32> @vabs_nxv4i32(<vscale x 4 x i32> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %v, i1 
false)
   ret <vscale x 4 x i32> %r
 }
@@ -108,6 +166,12 @@ define <vscale x 8 x i32> @vabs_nxv8i32(<vscale x 8 x i32> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %v, i1 
false)
   ret <vscale x 8 x i32> %r
 }
@@ -119,6 +183,12 @@ define <vscale x 16 x i32> @vabs_nxv16i32(<vscale x 16 x 
i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> %v, i1 
false)
   ret <vscale x 16 x i32> %r
 }
@@ -130,6 +200,12 @@ define <vscale x 1 x i64> @vabs_nxv1i64(<vscale x 1 x i64> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64> %v, i1 
false)
   ret <vscale x 1 x i64> %r
 }
@@ -141,6 +217,12 @@ define <vscale x 2 x i64> @vabs_nxv2i64(<vscale x 2 x i64> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %v, i1 
false)
   ret <vscale x 2 x i64> %r
 }
@@ -152,6 +234,12 @@ define <vscale x 4 x i64> @vabs_nxv4i64(<vscale x 4 x i64> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %v, i1 
false)
   ret <vscale x 4 x i64> %r
 }
@@ -163,6 +251,12 @@ define <vscale x 8 x i64> @vabs_nxv8i64(<vscale x 8 x i64> 
%v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %v, i1 
false)
   ret <vscale x 8 x i64> %r
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll 
b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 5b215c5173211..684c9abb37353 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd 
-target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d 
\
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
 
 define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x 
i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_abs_nxv1i8:
@@ -11,6 +15,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> 
%va, <vscale x 1 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 
false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -22,6 +32,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 
x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 
false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -33,6 +49,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8(<vscale x 2 x i8> 
%va, <vscale x 2 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 
false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -44,6 +66,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 
x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 
false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -55,6 +83,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8(<vscale x 4 x i8> 
%va, <vscale x 4 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 
false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -66,6 +100,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 
4 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 
false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -77,6 +117,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8(<vscale x 8 x i8> 
%va, <vscale x 8 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 
false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -88,6 +134,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 
8 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 
false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -99,6 +151,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8(<vscale x 16 x 
i8> %va, <vscale x 16 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/180142
_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to