https://github.com/wangpc-pp updated 
https://github.com/llvm/llvm-project/pull/180141

>From 4e6282a12cf7dcd830dd90aad35268d04ef5fc97 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <[email protected]>
Date: Fri, 6 Feb 2026 18:01:13 +0800
Subject: [PATCH] Limit SEW to 8/16

Created using spr 1.3.6-beta.1
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 18 ++++--
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  | 13 +++--
 llvm/test/CodeGen/RISCV/rvv/abd.ll            | 32 ++++++++---
 .../CodeGen/RISCV/rvv/fixed-vectors-abd.ll    | 56 ++++++++++++++-----
 4 files changed, 88 insertions(+), 31 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp 
b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6e236fcdae82d..387dc2ba5f388 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -995,9 +995,14 @@ RISCVTargetLowering::RISCVTargetLowering(const 
TargetMachine &TM,
       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
                          Legal);
 
-      if (Subtarget.hasStdExtZvabd())
-        setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
-      else
+      if (Subtarget.hasStdExtZvabd()) {
+        // Only SEW=8/16 are supported in Zvabd.
+        if (VT.getVectorElementType() == MVT::i8 ||
+            VT.getVectorElementType() == MVT::i16)
+          setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
+        else
+          setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
+      } else
         setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
 
       // Custom-lower extensions and truncations from/to mask types.
@@ -8819,11 +8824,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return lowerToScalableOp(Op, DAG);
   case ISD::ABDS:
   case ISD::ABDU: {
-    if (Subtarget.hasStdExtZvabd())
+    EVT VT = Op->getValueType(0);
+    // Only SEW=8/16 are supported in Zvabd.
+    if (Subtarget.hasStdExtZvabd() && VT.isVector() &&
+        (VT.getVectorElementType() == MVT::i8 ||
+         VT.getVectorElementType() == MVT::i16))
       return lowerToScalableOp(Op, DAG);
 
     SDLoc dl(Op);
-    EVT VT = Op->getValueType(0);
     SDValue LHS = DAG.getFreeze(Op->getOperand(0));
     SDValue RHS = DAG.getFreeze(Op->getOperand(1));
     bool IsSigned = Op->getOpcode() == ISD::ABDS;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td 
b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index c73d0f20a7e8b..fa7b188fc7325 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -42,10 +42,15 @@ def riscv_abds_vl : RVSDNode<"ABDS_VL", 
SDT_RISCVIntBinOp_VL, [SDNPCommutative]>
 def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, 
[SDNPCommutative]>;
 } // let HasPassthruOp = true, HasMaskOp = true
 
+// These instructions are defined for SEW=8 and SEW=16, otherwise the 
instruction
+// encoding is reserved.
+defvar ABDIntVectors = !filter(vti, AllIntegerVectors, !or(!eq(vti.SEW, 8),
+                                                           !eq(vti.SEW, 16)));
+
 let Predicates = [HasStdExtZvabd] in {
-defm : VPatBinarySDNode_VV<abds, "PseudoVABD">;
-defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU">;
+defm : VPatBinarySDNode_VV<abds, "PseudoVABD", ABDIntVectors>;
+defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
 
-defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD">;
-defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU">;
+defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
+defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
 } // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll 
b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index 837e53af5578e..c451559a29a69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -112,7 +112,9 @@ define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, 
<vscale x 4 x i32> %b)
 ; ZVABD-LABEL: sabd_s:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    vmin.vv v12, v8, v10
+; ZVABD-NEXT:    vmax.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
 ; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
@@ -159,7 +161,9 @@ define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, 
<vscale x 2 x i64> %b)
 ; ZVABD-LABEL: sabd_d:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    vmin.vv v12, v8, v10
+; ZVABD-NEXT:    vmax.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
 ; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
@@ -183,7 +187,9 @@ define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 
x i32> %a, <vscale x
 ; ZVABD-LABEL: sabd_d_promoted_ops:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v10, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v10, v8, v10
 ; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v8, v10
 ; ZVABD-NEXT:    ret
@@ -302,7 +308,9 @@ define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, 
<vscale x 4 x i32> %b)
 ; ZVABD-LABEL: uabd_s:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    vminu.vv v12, v8, v10
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
 ; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
@@ -349,7 +357,9 @@ define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, 
<vscale x 2 x i64> %b)
 ; ZVABD-LABEL: uabd_d:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    vminu.vv v12, v8, v10
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
 ; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
@@ -373,7 +383,9 @@ define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 
x i32> %a, <vscale x
 ; ZVABD-LABEL: uabd_d_promoted_ops:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v10, v8, v9
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v10, v8, v10
 ; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v8, v10
 ; ZVABD-NEXT:    ret
@@ -400,7 +412,9 @@ define <vscale x 4 x i32> 
@uabd_non_matching_extension(<vscale x 4 x i32> %a, <v
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf4 v12, v10
-; ZVABD-NEXT:    vabdu.vv v8, v8, v12
+; ZVABD-NEXT:    vminu.vv v10, v8, v12
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v12
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>
@@ -457,7 +471,9 @@ define <vscale x 4 x i32> 
@uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
 ; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf4 v10, v8
 ; ZVABD-NEXT:    vsext.vf4 v12, v9
-; ZVABD-NEXT:    vabd.vv v8, v10, v12
+; ZVABD-NEXT:    vmin.vv v8, v10, v12
+; ZVABD-NEXT:    vmax.vv v10, v10, v12
+; ZVABD-NEXT:    vsub.vv v8, v10, v8
 ; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
   %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll 
b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index ce4d3f9392697..998668dc26bb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -187,7 +187,9 @@ define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
 ; ZVABD-LABEL: sabd_2s:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i32> %a to <2 x i64>
   %b.sext = sext <2 x i32> %b to <2 x i64>
@@ -236,7 +238,9 @@ define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
 ; ZVABD-LABEL: sabd_4s:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i32> %a to <4 x i64>
   %b.sext = sext <4 x i32> %b to <4 x i64>
@@ -284,7 +288,9 @@ define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
 ; ZVABD-LABEL: sabd_2d:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i64> %a to <2 x i128>
   %b.sext = sext <2 x i64> %b to <2 x i128>
@@ -309,7 +315,9 @@ define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x 
i32> %b) {
 ; ZVABD-LABEL: sabd_2d_promoted_ops:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v9, v8, v10
 ; ZVABD-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v8, v9
 ; ZVABD-NEXT:    ret
@@ -481,7 +489,9 @@ define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
 ; ZVABD-LABEL: uabd_2s:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i32> %a to <2 x i64>
   %b.zext = zext <2 x i32> %b to <2 x i64>
@@ -530,7 +540,9 @@ define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
 ; ZVABD-LABEL: uabd_4s:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i32> %a to <4 x i64>
   %b.zext = zext <4 x i32> %b to <4 x i64>
@@ -578,7 +590,9 @@ define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
 ; ZVABD-LABEL: uabd_2d:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i64> %a to <2 x i128>
   %b.zext = zext <2 x i64> %b to <2 x i128>
@@ -603,7 +617,9 @@ define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x 
i32> %b) {
 ; ZVABD-LABEL: uabd_2d_promoted_ops:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v9, v8, v10
 ; ZVABD-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v8, v9
 ; ZVABD-NEXT:    ret
@@ -755,7 +771,9 @@ define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> 
%b) {
 ; ZVABD-LABEL: sabd_v4i32_nsw:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %sub = sub nsw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
@@ -775,7 +793,9 @@ define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> 
%b) {
 ; ZVABD-LABEL: sabd_v2i64_nsw:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %sub = sub nsw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
@@ -837,7 +857,9 @@ define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) 
{
 ; ZVABD-LABEL: smaxmin_v4i32:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
   %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
@@ -858,7 +880,9 @@ define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) 
{
 ; ZVABD-LABEL: smaxmin_v2i64:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
@@ -921,7 +945,9 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) 
{
 ; ZVABD-LABEL: umaxmin_v4i32:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
   %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
@@ -942,7 +968,9 @@ define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) 
{
 ; ZVABD-LABEL: umaxmin_v2i64:
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
 ; ZVABD-NEXT:    ret
   %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)

_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to